Spaces:
No application file
No application file
TrBn17
commited on
Commit
·
80dbe44
0
Parent(s):
reset repo without Dataset.xlsx
Browse files- .gitattributes +36 -0
- README.md +10 -0
- source/.dockerignore +20 -0
- source/.gitattributes +37 -0
- source/.gitignore +13 -0
- source/.gradio/certificate.pem +31 -0
- source/.vscode/settings.json +4 -0
- source/Dockerfile +16 -0
- source/README.md +140 -0
- source/app.py +1332 -0
- source/conversation_cache.json +909 -0
- source/docker-compose.yml +57 -0
- source/logs/daily_20250814.log +4 -0
- source/logs/daily_20250814.log.2025-08-14 +334 -0
- source/logs/errors.log +1 -0
- source/logs/transportation.log +358 -0
- source/requirements.txt +42 -0
- source/requirements_hf.txt +8 -0
- source/requirements_simple.txt +12 -0
- source/src/app/__init__.py +0 -0
- source/src/app/api/__init__.py +0 -0
- source/src/app/api/chat.py +129 -0
- source/src/app/api/predict.py +145 -0
- source/src/app/schema/__init__.py +0 -0
- source/src/app/schema/chat.py +29 -0
- source/src/app/schema/transportation.py +30 -0
- source/src/config/__init__.py +0 -0
- source/src/config/logging_config.py +168 -0
- source/src/config/setting.py +61 -0
- source/src/database/__init__.py +0 -0
- source/src/database/db.py +0 -0
- source/src/domain/__init__.py +0 -0
- source/src/domain/functions/get_options_function.json +8 -0
- source/src/domain/functions/predict_function.json +42 -0
- source/src/domain/node/call_llm.py +124 -0
- source/src/domain/node/input.py +98 -0
- source/src/domain/node/save_cache.py +103 -0
- source/src/domain/transportation_graph.py +28 -0
- source/src/main.py +41 -0
- source/src/state/graph_state.py +47 -0
- source/start.bat +19 -0
- source/start.sh +29 -0
.gitattributes
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.xlsx filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Transportation
|
| 3 |
+
emoji: 🚀
|
| 4 |
+
colorFrom: red
|
| 5 |
+
colorTo: indigo
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
source/.dockerignore
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.git
|
| 2 |
+
.gitignore
|
| 3 |
+
.gitattributes
|
| 4 |
+
.vscode
|
| 5 |
+
.gradio
|
| 6 |
+
logs/
|
| 7 |
+
__pycache__/
|
| 8 |
+
*.pyc
|
| 9 |
+
*.pyo
|
| 10 |
+
*.pyd
|
| 11 |
+
.Python
|
| 12 |
+
env
|
| 13 |
+
venv
|
| 14 |
+
.env
|
| 15 |
+
.DS_Store
|
| 16 |
+
Thumbs.db
|
| 17 |
+
docker-compose.yml
|
| 18 |
+
start.sh
|
| 19 |
+
start.bat
|
| 20 |
+
README.md
|
source/.gitattributes
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
src/models/*.json filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*.xlsx filter=lfs diff=lfs merge=lfs -text
|
source/.gitignore
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python environment & cache
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*.egg-info/
|
| 5 |
+
*.egg
|
| 6 |
+
|
| 7 |
+
# Virtual environments
|
| 8 |
+
.env
|
| 9 |
+
.venv/
|
| 10 |
+
|
| 11 |
+
# Models
|
| 12 |
+
src/models/
|
| 13 |
+
.xlsx
|
source/.gradio/certificate.pem
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
| 3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
| 4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
| 5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
| 6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
| 7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
| 8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
| 9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
| 10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
| 11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
| 12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
| 13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
| 14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
| 15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
| 16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
| 17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
| 18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
| 19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
| 20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
| 21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
| 22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
| 23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
| 24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
| 25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
| 26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
| 27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
| 28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
| 29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
| 30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
| 31 |
+
-----END CERTIFICATE-----
|
source/.vscode/settings.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"python-envs.defaultEnvManager": "ms-python.python:venv",
|
| 3 |
+
"python-envs.pythonProjects": []
|
| 4 |
+
}
|
source/Dockerfile
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
| 2 |
+
# you will also find guides on how best to write your Dockerfile
|
| 3 |
+
|
| 4 |
+
FROM python:3.11
|
| 5 |
+
|
| 6 |
+
RUN useradd -m -u 1000 user
|
| 7 |
+
USER user
|
| 8 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 9 |
+
|
| 10 |
+
WORKDIR /app
|
| 11 |
+
|
| 12 |
+
COPY --chown=user ./requirements_simple.txt requirements.txt
|
| 13 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 14 |
+
|
| 15 |
+
COPY --chown=user . /app
|
| 16 |
+
CMD ["python", "app.py"]
|
source/README.md
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Transportation AI System
|
| 3 |
+
emoji: 🚚
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
license: mit
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
An AI-powered system to predict optimal transportation modes for logistics and supply chain management, deployable via Docker.
|
| 12 |
+
|
| 13 |
+
## Quick Start with Docker
|
| 14 |
+
|
| 15 |
+
### Option 1: Frontend Only (Gradio UI)
|
| 16 |
+
|
| 17 |
+
```bash
|
| 18 |
+
# Build and run frontend only
|
| 19 |
+
docker build -t transportation-ai .
|
| 20 |
+
docker run -p 7860:7860 transportation-ai
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
### Option 2: Backend + Frontend (Recommended)
|
| 24 |
+
|
| 25 |
+
```bash
|
| 26 |
+
# Run both services with docker-compose
|
| 27 |
+
docker-compose up -d
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
### Option 3: All-in-one Container
|
| 31 |
+
|
| 32 |
+
```bash
|
| 33 |
+
# Run both services in single container
|
| 34 |
+
docker-compose --profile all-in-one up
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
## Development Setup
|
| 38 |
+
|
| 39 |
+
### Local Development
|
| 40 |
+
|
| 41 |
+
```bash
|
| 42 |
+
# Install dependencies
|
| 43 |
+
pip install -r requirements.txt
|
| 44 |
+
|
| 45 |
+
# Start backend API
|
| 46 |
+
python -m src.main
|
| 47 |
+
|
| 48 |
+
# Start frontend (in another terminal)
|
| 49 |
+
python app.py
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
### Windows Users
|
| 53 |
+
|
| 54 |
+
```batch
|
| 55 |
+
# Use the batch script
|
| 56 |
+
start.bat
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
### Linux/Mac Users
|
| 60 |
+
|
| 61 |
+
```bash
|
| 62 |
+
# Use the shell script
|
| 63 |
+
chmod +x start.sh
|
| 64 |
+
./start.sh
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
## Features
|
| 68 |
+
|
| 69 |
+
### Transportation Prediction
|
| 70 |
+
|
| 71 |
+
- Predict optimal shipping methods (Air, Air Charter, Ocean, Truck)
|
| 72 |
+
- Display confidence scores and alternatives
|
| 73 |
+
- Interactive probability distribution charts
|
| 74 |
+
- Automatic weight and cost estimation
|
| 75 |
+
|
| 76 |
+
### AI Chat Assistant
|
| 77 |
+
|
| 78 |
+
- Chat about transportation and logistics
|
| 79 |
+
- Get insights on shipping methods
|
| 80 |
+
- Compare different transportation modes
|
| 81 |
+
- Cost analysis and optimization tips
|
| 82 |
+
|
| 83 |
+
## How to Use
|
| 84 |
+
|
| 85 |
+
1. **Prediction Tab**: Enter shipment details to get AI recommendations
|
| 86 |
+
2. **Chat Tab**: Ask questions about transportation and logistics
|
| 87 |
+
|
| 88 |
+
## Docker Services
|
| 89 |
+
|
| 90 |
+
### Backend API (Port 3454)
|
| 91 |
+
|
| 92 |
+
- FastAPI server với prediction endpoints
|
| 93 |
+
- Loads models from Hugging Face
|
| 94 |
+
- REST API documentation tại `/docs`
|
| 95 |
+
|
| 96 |
+
### Frontend UI (Port 7860)
|
| 97 |
+
|
| 98 |
+
- Gradio interface
|
| 99 |
+
- Real-time streaming chat
|
| 100 |
+
- Interactive prediction forms
|
| 101 |
+
|
| 102 |
+
## Technical Details
|
| 103 |
+
|
| 104 |
+
- **Model**: XGBoost trained on logistics data từ Hugging Face
|
| 105 |
+
- **Input Features**: Project code, country, price, vendor, weight, etc.
|
| 106 |
+
- **Output**: Transportation mode with confidence score
|
| 107 |
+
- **Framework**: FastAPI + Gradio + scikit-learn + XGBoost
|
| 108 |
+
- **Deployment**: Docker + Docker Compose
|
| 109 |
+
|
| 110 |
+
## Sample Questions for Chat
|
| 111 |
+
|
| 112 |
+
- "Compare Air vs Ocean transportation"
|
| 113 |
+
- "What affects shipping costs?"
|
| 114 |
+
- "When should I use truck transport?"
|
| 115 |
+
- "Optimize logistics for my company"
|
| 116 |
+
|
| 117 |
+
## Configuration
|
| 118 |
+
|
| 119 |
+
### Environment Variables
|
| 120 |
+
|
| 121 |
+
```env
|
| 122 |
+
GEMINI_API_KEY=your_gemini_api_key
|
| 123 |
+
ACCESS=your_huggingface_token
|
| 124 |
+
API_BASE_URL=http://localhost:3454/api
|
| 125 |
+
```
|
| 126 |
+
|
| 127 |
+
### Docker Compose Services
|
| 128 |
+
|
| 129 |
+
- `backend`: FastAPI server (port 3454)
|
| 130 |
+
- `frontend`: Gradio UI (port 7860)
|
| 131 |
+
- `app`: All-in-one service (both ports)
|
| 132 |
+
|
| 133 |
+
## API Endpoints
|
| 134 |
+
|
| 135 |
+
- `GET /` - API status
|
| 136 |
+
- `POST /api/predict-transportation` - Prediction
|
| 137 |
+
- `GET /api/transportation-options` - Available options
|
| 138 |
+
- `POST /api/chat` - AI chat (streaming)
|
| 139 |
+
|
| 140 |
+
Built with Docker, FastAPI, Gradio and XGBoost
|
source/app.py
ADDED
|
@@ -0,0 +1,1332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Modern Gradio Application for Transportation Prediction
|
| 3 |
+
Uses Gradio 5.x best practices with clean architecture
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import gradio as gr
|
| 7 |
+
import requests
|
| 8 |
+
import json
|
| 9 |
+
import asyncio
|
| 10 |
+
import aiohttp
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from typing import Optional, Dict, Any, List, AsyncGenerator
|
| 13 |
+
import os
|
| 14 |
+
from dotenv import load_dotenv
|
| 15 |
+
|
| 16 |
+
# Load environment variables
|
| 17 |
+
load_dotenv()
|
| 18 |
+
|
| 19 |
+
# Configuration
|
| 20 |
+
API_BASE_URL = os.getenv("API_BASE_URL", "http://localhost:3454/api")
|
| 21 |
+
PREDICT_URL = f"{API_BASE_URL}/predict-transportation"
|
| 22 |
+
OPTIONS_URL = f"{API_BASE_URL}/transportation-options"
|
| 23 |
+
CHAT_URL = f"{API_BASE_URL}/chat"
|
| 24 |
+
|
| 25 |
+
# Check if running on HF Spaces
|
| 26 |
+
IS_HF_SPACES = os.getenv("SPACE_ID") is not None
|
| 27 |
+
|
| 28 |
+
class APIClient:
|
| 29 |
+
"""Unified API client for all endpoints"""
|
| 30 |
+
|
| 31 |
+
def __init__(self):
|
| 32 |
+
self.session = requests.Session()
|
| 33 |
+
self.session.timeout = 30
|
| 34 |
+
self.has_backend = False
|
| 35 |
+
self.local_predictor = None
|
| 36 |
+
|
| 37 |
+
# Always load options cache first
|
| 38 |
+
self.options_cache = self._load_options()
|
| 39 |
+
|
| 40 |
+
# Initialize local predictor for standalone mode
|
| 41 |
+
self._init_local_predictor()
|
| 42 |
+
|
| 43 |
+
def _init_local_predictor(self):
|
| 44 |
+
"""Initialize local predictor for standalone mode"""
|
| 45 |
+
try:
|
| 46 |
+
from src.app.api.predict import TransportationPredictor
|
| 47 |
+
self.local_predictor = TransportationPredictor()
|
| 48 |
+
print("✅ Local predictor initialized successfully")
|
| 49 |
+
except Exception as e:
|
| 50 |
+
print(f"⚠️ Could not initialize local predictor: {e}")
|
| 51 |
+
print("Will use fallback prediction method")
|
| 52 |
+
self.local_predictor = None
|
| 53 |
+
|
| 54 |
+
def _load_options(self) -> Dict[str, List[str]]:
|
| 55 |
+
"""Load available options from API"""
|
| 56 |
+
try:
|
| 57 |
+
response = self.session.get(OPTIONS_URL)
|
| 58 |
+
response.raise_for_status()
|
| 59 |
+
self.has_backend = True
|
| 60 |
+
return response.json()
|
| 61 |
+
except Exception:
|
| 62 |
+
self.has_backend = False
|
| 63 |
+
return {
|
| 64 |
+
"shipment_modes": ["Air", "Air Charter", "Ocean", "Truck"],
|
| 65 |
+
"sample_vendors": ["ABBOTT LABORATORIES", "PFIZER", "MERCK"],
|
| 66 |
+
"sample_countries": ["Vietnam", "Thailand", "Indonesia"],
|
| 67 |
+
"sample_projects": ["100-CI-T01", "200-MA-T02", "300-VN-T03"]
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
def predict_transportation(self, **kwargs) -> str:
|
| 71 |
+
"""Predict transportation mode"""
|
| 72 |
+
try:
|
| 73 |
+
# Validate required fields
|
| 74 |
+
required_fields = ["project_code", "country", "pack_price", "vendor"]
|
| 75 |
+
for field in required_fields:
|
| 76 |
+
if not kwargs.get(field):
|
| 77 |
+
return f"❌ **Error:** Thiếu thông tin bắt buộc: {field}"
|
| 78 |
+
|
| 79 |
+
if kwargs.get("pack_price", 0) <= 0:
|
| 80 |
+
return "❌ **Error:** Giá gói phải lớn hơn 0"
|
| 81 |
+
|
| 82 |
+
# Try backend API first
|
| 83 |
+
if self.has_backend:
|
| 84 |
+
return self._predict_via_api(**kwargs)
|
| 85 |
+
# Fallback to local predictor for HF Spaces
|
| 86 |
+
elif hasattr(self, 'local_predictor') and self.local_predictor:
|
| 87 |
+
return self._predict_via_local(**kwargs)
|
| 88 |
+
else:
|
| 89 |
+
return "❌ **Error:** No prediction service available"
|
| 90 |
+
|
| 91 |
+
except Exception as e:
|
| 92 |
+
return f"❌ **Unexpected Error:** {str(e)}"
|
| 93 |
+
|
| 94 |
+
def _predict_via_api(self, **kwargs) -> str:
|
| 95 |
+
"""Predict via backend API"""
|
| 96 |
+
# Prepare request data
|
| 97 |
+
request_data = {
|
| 98 |
+
"project_code": kwargs["project_code"],
|
| 99 |
+
"country": kwargs["country"],
|
| 100 |
+
"pack_price": float(kwargs["pack_price"]),
|
| 101 |
+
"vendor": kwargs["vendor"]
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
# Add optional fields
|
| 105 |
+
optional_fields = ["weight_kg", "freight_cost_usd", "delivery_date", "line_item_quantity"]
|
| 106 |
+
for field in optional_fields:
|
| 107 |
+
value = kwargs.get(field)
|
| 108 |
+
if value and (field in ["weight_kg", "freight_cost_usd", "line_item_quantity"] and value > 0) or field == "delivery_date":
|
| 109 |
+
request_data[field] = value
|
| 110 |
+
|
| 111 |
+
# Make API call
|
| 112 |
+
response = self.session.post(PREDICT_URL, json=request_data)
|
| 113 |
+
response.raise_for_status()
|
| 114 |
+
result = response.json()
|
| 115 |
+
|
| 116 |
+
return self._format_prediction_result(result)
|
| 117 |
+
|
| 118 |
+
def _predict_via_local(self, **kwargs) -> str:
|
| 119 |
+
"""Predict via local predictor"""
|
| 120 |
+
if self.local_predictor:
|
| 121 |
+
try:
|
| 122 |
+
from src.app.schema.transportation import TransportationRequest
|
| 123 |
+
|
| 124 |
+
# Create request object
|
| 125 |
+
request = TransportationRequest(
|
| 126 |
+
project_code=kwargs["project_code"],
|
| 127 |
+
country=kwargs["country"],
|
| 128 |
+
pack_price=float(kwargs["pack_price"]),
|
| 129 |
+
vendor=kwargs["vendor"],
|
| 130 |
+
weight_kg=kwargs.get("weight_kg"),
|
| 131 |
+
freight_cost_usd=kwargs.get("freight_cost_usd"),
|
| 132 |
+
delivery_date=kwargs.get("delivery_date"),
|
| 133 |
+
line_item_quantity=kwargs.get("line_item_quantity")
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
# Get prediction
|
| 137 |
+
result = self.local_predictor.predict_shipment_mode(request)
|
| 138 |
+
|
| 139 |
+
# Convert response to dict format
|
| 140 |
+
result_dict = {
|
| 141 |
+
"predicted_shipment_mode": result.predicted_shipment_mode,
|
| 142 |
+
"confidence_score": result.confidence_score,
|
| 143 |
+
"alternative_modes": result.alternative_modes,
|
| 144 |
+
"estimated_weight_kg": result.estimated_weight_kg,
|
| 145 |
+
"estimated_freight_cost_usd": result.estimated_freight_cost_usd,
|
| 146 |
+
"encoded_features": result.encoded_features
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
return self._format_prediction_result(result_dict)
|
| 150 |
+
except Exception as e:
|
| 151 |
+
print(f"Local predictor error: {e}")
|
| 152 |
+
return self._fallback_prediction(**kwargs)
|
| 153 |
+
else:
|
| 154 |
+
return self._fallback_prediction(**kwargs)
|
| 155 |
+
|
| 156 |
+
def _fallback_prediction(self, **kwargs) -> str:
|
| 157 |
+
"""Simple rule-based fallback prediction"""
|
| 158 |
+
pack_price = float(kwargs.get("pack_price", 0))
|
| 159 |
+
weight = kwargs.get("weight_kg") or 10.0 # Default weight
|
| 160 |
+
country = kwargs.get("country", "").lower()
|
| 161 |
+
|
| 162 |
+
# Simple rules
|
| 163 |
+
if pack_price > 100 or "urgent" in country:
|
| 164 |
+
mode = "Air"
|
| 165 |
+
confidence = 0.75
|
| 166 |
+
elif weight > 100 or pack_price < 30:
|
| 167 |
+
mode = "Ocean"
|
| 168 |
+
confidence = 0.70
|
| 169 |
+
elif any(c in country for c in ["vietnam", "thailand", "singapore"]):
|
| 170 |
+
mode = "Truck"
|
| 171 |
+
confidence = 0.65
|
| 172 |
+
else:
|
| 173 |
+
mode = "Air Charter"
|
| 174 |
+
confidence = 0.60
|
| 175 |
+
|
| 176 |
+
# Create fake result for display
|
| 177 |
+
result_dict = {
|
| 178 |
+
"predicted_shipment_mode": mode,
|
| 179 |
+
"confidence_score": confidence,
|
| 180 |
+
"alternative_modes": [
|
| 181 |
+
{"mode": "Ocean", "probability": 0.25},
|
| 182 |
+
{"mode": "Truck", "probability": 0.15}
|
| 183 |
+
],
|
| 184 |
+
"estimated_weight_kg": weight,
|
| 185 |
+
"estimated_freight_cost_usd": pack_price * 0.1,
|
| 186 |
+
"encoded_features": {
|
| 187 |
+
"Project_Code": 1,
|
| 188 |
+
"Country": 1,
|
| 189 |
+
"Vendor": 1
|
| 190 |
+
}
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
return self._format_prediction_result(result_dict) + """
|
| 194 |
+
|
| 195 |
+
<div style="background: #fef3c7; padding: 1rem; border-radius: 8px; border-left: 4px solid #f59e0b; margin: 1rem 0;">
|
| 196 |
+
<p style="margin: 0; color: #92400e;"><strong>⚠️ Demo Mode:</strong> Đang sử dụng rule-based prediction. Kết quả chỉ mang tính chất tham khảo.</p>
|
| 197 |
+
</div>
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
def _format_prediction_result(self, result: Dict) -> str:
|
| 201 |
+
"""Format prediction result for display with beautiful markdown"""
|
| 202 |
+
confidence = result.get('confidence_score', 0)
|
| 203 |
+
mode = result.get('predicted_shipment_mode', 'Unknown')
|
| 204 |
+
|
| 205 |
+
# Emoji for transport modes
|
| 206 |
+
mode_emoji = {
|
| 207 |
+
'Air': '✈️', 'Air Charter': '🛩️', 'Ocean': '🚢',
|
| 208 |
+
'Truck': '🚛', 'Rail': '🚆', 'Express': '📦'
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
# Confidence color coding
|
| 212 |
+
conf_color = "#10b981" if confidence >= 0.8 else "#f59e0b" if confidence >= 0.6 else "#ef4444"
|
| 213 |
+
conf_emoji = "🎯" if confidence >= 0.8 else "⚠️" if confidence >= 0.6 else "❌"
|
| 214 |
+
|
| 215 |
+
output = f"""
|
| 216 |
+
<div style="background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%); padding: 1.5rem; border-radius: 12px; border-left: 4px solid {conf_color}; margin: 1rem 0;">
|
| 217 |
+
|
| 218 |
+
## {mode_emoji.get(mode, '🚚')} **Kết quả dự đoán vận chuyển**
|
| 219 |
+
|
| 220 |
+
<div style="display: flex; justify-content: space-between; align-items: center; background: white; padding: 1rem; border-radius: 8px; margin: 1rem 0; box-shadow: 0 2px 10px rgba(0,0,0,0.1);">
|
| 221 |
+
<div>
|
| 222 |
+
<h3 style="margin: 0; color: #1e293b;">🎯 <strong>Phương thức đề xuất</strong></h3>
|
| 223 |
+
<p style="font-size: 1.4rem; font-weight: 700; color: {conf_color}; margin: 0.5rem 0;">{mode_emoji.get(mode, '🚚')} {mode}</p>
|
| 224 |
+
</div>
|
| 225 |
+
<div style="text-align: right;">
|
| 226 |
+
<h3 style="margin: 0; color: #1e293b;">{conf_emoji} <strong>Độ tin cậy</strong></h3>
|
| 227 |
+
<p style="font-size: 1.4rem; font-weight: 700; color: {conf_color}; margin: 0.5rem 0;">{confidence:.1%}</p>
|
| 228 |
+
</div>
|
| 229 |
+
</div>
|
| 230 |
+
"""
|
| 231 |
+
|
| 232 |
+
# Add estimates if available
|
| 233 |
+
if result.get('estimated_weight_kg') or result.get('estimated_freight_cost_usd'):
|
| 234 |
+
output += """
|
| 235 |
+
### 📊 **Ước tính chi phí & trọng lượng**
|
| 236 |
+
|
| 237 |
+
<div style="display: grid; grid-template-columns: 1fr 1fr; gap: 1rem; margin: 1rem 0;">
|
| 238 |
+
"""
|
| 239 |
+
if result.get('estimated_weight_kg'):
|
| 240 |
+
weight = result['estimated_weight_kg']
|
| 241 |
+
output += f"""
|
| 242 |
+
<div style="background: white; padding: 1rem; border-radius: 8px; text-align: center; box-shadow: 0 2px 8px rgba(0,0,0,0.1);">
|
| 243 |
+
<div style="font-size: 2rem;">⚖️</div>
|
| 244 |
+
<div style="font-weight: 600; color: #1e293b;">Khối lượng</div>
|
| 245 |
+
<div style="font-size: 1.2rem; font-weight: 700; color: #0ea5e9;">{weight:.1f} kg</div>
|
| 246 |
+
</div>
|
| 247 |
+
"""
|
| 248 |
+
|
| 249 |
+
if result.get('estimated_freight_cost_usd'):
|
| 250 |
+
cost = result['estimated_freight_cost_usd']
|
| 251 |
+
output += f"""
|
| 252 |
+
<div style="background: white; padding: 1rem; border-radius: 8px; text-align: center; box-shadow: 0 2px 8px rgba(0,0,0,0.1);">
|
| 253 |
+
<div style="font-size: 2rem;">💰</div>
|
| 254 |
+
<div style="font-weight: 600; color: #1e293b;">Chi phí vận chuyển</div>
|
| 255 |
+
<div style="font-size: 1.2rem; font-weight: 700; color: #10b981;">${cost:.2f}</div>
|
| 256 |
+
</div>
|
| 257 |
+
"""
|
| 258 |
+
output += "</div>\n"
|
| 259 |
+
|
| 260 |
+
# Add alternatives with beautiful styling
|
| 261 |
+
alternatives = result.get('alternative_modes', [])
|
| 262 |
+
if alternatives:
|
| 263 |
+
output += """
|
| 264 |
+
### 🔄 **Các lựa chọn khác**
|
| 265 |
+
|
| 266 |
+
<div style="background: white; padding: 1rem; border-radius: 8px; margin: 1rem 0; box-shadow: 0 2px 8px rgba(0,0,0,0.1);">
|
| 267 |
+
"""
|
| 268 |
+
for i, alt in enumerate(alternatives[:3]): # Show top 3 alternatives
|
| 269 |
+
alt_mode = alt.get('mode', 'Unknown')
|
| 270 |
+
alt_prob = alt.get('probability', 0)
|
| 271 |
+
alt_emoji = mode_emoji.get(alt_mode, '📦')
|
| 272 |
+
|
| 273 |
+
bar_width = int(alt_prob * 100)
|
| 274 |
+
bar_color = "#8b5cf6" if i == 0 else "#06b6d4" if i == 1 else "#84cc16"
|
| 275 |
+
|
| 276 |
+
output += f"""
|
| 277 |
+
<div style="margin: 0.5rem 0; padding: 0.5rem;">
|
| 278 |
+
<div style="display: flex; justify-content: space-between; align-items: center; margin-bottom: 0.25rem;">
|
| 279 |
+
<span style="font-weight: 600;">{alt_emoji} {alt_mode}</span>
|
| 280 |
+
<span style="font-weight: 700; color: {bar_color};">{alt_prob:.1%}</span>
|
| 281 |
+
</div>
|
| 282 |
+
<div style="background: #e2e8f0; height: 8px; border-radius: 4px; overflow: hidden;">
|
| 283 |
+
<div style="background: {bar_color}; height: 100%; width: {bar_width}%; transition: width 0.5s ease;"></div>
|
| 284 |
+
</div>
|
| 285 |
+
</div>
|
| 286 |
+
"""
|
| 287 |
+
output += "</div>\n"
|
| 288 |
+
|
| 289 |
+
# Add project details in a clean format
|
| 290 |
+
if result.get('encoded_features'):
|
| 291 |
+
output += """
|
| 292 |
+
### 🔧 **Chi tiết dự án**
|
| 293 |
+
|
| 294 |
+
<div style="background: white; padding: 1rem; border-radius: 8px; margin: 1rem 0; box-shadow: 0 2px 8px rgba(0,0,0,0.1);">
|
| 295 |
+
<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 1rem;">
|
| 296 |
+
"""
|
| 297 |
+
features = result['encoded_features']
|
| 298 |
+
feature_icons = {
|
| 299 |
+
'Project_Code': '🏷️', 'Country': '🌍', 'Vendor': '🏢',
|
| 300 |
+
'Pack_Price': '💰', 'Weight': '⚖️', 'Delivery_Date': '📅'
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
for key, value in features.items():
|
| 304 |
+
icon = feature_icons.get(key, '📊')
|
| 305 |
+
display_key = key.replace('_', ' ').title()
|
| 306 |
+
output += f"""
|
| 307 |
+
<div style="text-align: center; padding: 0.5rem;">
|
| 308 |
+
<div style="font-size: 1.5rem; margin-bottom: 0.25rem;">{icon}</div>
|
| 309 |
+
<div style="font-size: 0.85rem; color: #64748b; font-weight: 500;">{display_key}</div>
|
| 310 |
+
<div style="font-weight: 600; color: #1e293b;">{value}</div>
|
| 311 |
+
</div>
|
| 312 |
+
"""
|
| 313 |
+
output += """
|
| 314 |
+
</div>
|
| 315 |
+
</div>
|
| 316 |
+
"""
|
| 317 |
+
|
| 318 |
+
output += """
|
| 319 |
+
</div>
|
| 320 |
+
|
| 321 |
+
<div style="text-align: center; margin-top: 1rem; padding: 1rem; background: linear-gradient(135deg, #e0f2fe 0%, #f0f9ff 100%); border-radius: 8px;">
|
| 322 |
+
<p style="margin: 0; color: #0369a1; font-weight: 600;">✨ Dự đoán được tạo bởi AI với độ chính xác cao</p>
|
| 323 |
+
</div>
|
| 324 |
+
"""
|
| 325 |
+
|
| 326 |
+
return output
|
| 327 |
+
|
| 328 |
+
async def chat_stream(self, message: str) -> AsyncGenerator[str, None]:
|
| 329 |
+
"""Stream chat with API backend or fallback to local chat"""
|
| 330 |
+
if not message.strip():
|
| 331 |
+
yield "Vui lòng nhập tin nhắn"
|
| 332 |
+
return
|
| 333 |
+
|
| 334 |
+
# Try backend API first
|
| 335 |
+
if self.has_backend:
|
| 336 |
+
try:
|
| 337 |
+
async for chunk in self._chat_via_api(message):
|
| 338 |
+
yield chunk
|
| 339 |
+
return
|
| 340 |
+
except Exception as e:
|
| 341 |
+
yield f"❌ Backend error, switching to local mode: {str(e)}"
|
| 342 |
+
|
| 343 |
+
# Fallback to local chat
|
| 344 |
+
async for chunk in self._chat_local(message):
|
| 345 |
+
yield chunk
|
| 346 |
+
|
| 347 |
+
async def _chat_via_api(self, message: str) -> AsyncGenerator[str, None]:
|
| 348 |
+
"""Chat via backend API"""
|
| 349 |
+
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=60)) as session:
|
| 350 |
+
request_data = {"message": message}
|
| 351 |
+
|
| 352 |
+
async with session.post(CHAT_URL, json=request_data) as response:
|
| 353 |
+
if response.status != 200:
|
| 354 |
+
error_text = await response.text()
|
| 355 |
+
raise Exception(f"API Error {response.status}: {error_text}")
|
| 356 |
+
|
| 357 |
+
accumulated_text = ""
|
| 358 |
+
|
| 359 |
+
async for line in response.content:
|
| 360 |
+
line_str = line.decode('utf-8').strip()
|
| 361 |
+
|
| 362 |
+
if line_str.startswith("data: "):
|
| 363 |
+
data_str = line_str[6:].strip()
|
| 364 |
+
|
| 365 |
+
if data_str == "[DONE]":
|
| 366 |
+
break
|
| 367 |
+
|
| 368 |
+
try:
|
| 369 |
+
chunk_data = json.loads(data_str)
|
| 370 |
+
event_type = chunk_data.get("event")
|
| 371 |
+
|
| 372 |
+
if event_type == "delta":
|
| 373 |
+
content = chunk_data.get("content", "")
|
| 374 |
+
accumulated_text += content
|
| 375 |
+
yield accumulated_text
|
| 376 |
+
|
| 377 |
+
elif event_type == "status":
|
| 378 |
+
stage = chunk_data.get("stage", "")
|
| 379 |
+
message_text = chunk_data.get("message", "")
|
| 380 |
+
if stage == "starting":
|
| 381 |
+
yield f"🔄 {message_text}"
|
| 382 |
+
elif stage == "error":
|
| 383 |
+
yield f"❌ Error: {message_text}"
|
| 384 |
+
|
| 385 |
+
elif event_type == "final":
|
| 386 |
+
final_content = chunk_data.get("content", accumulated_text)
|
| 387 |
+
if final_content and final_content.strip():
|
| 388 |
+
yield final_content
|
| 389 |
+
else:
|
| 390 |
+
yield accumulated_text or "✅ Đã xử lý xong"
|
| 391 |
+
|
| 392 |
+
except json.JSONDecodeError:
|
| 393 |
+
continue
|
| 394 |
+
|
| 395 |
+
# Ensure we have some output
|
| 396 |
+
if not accumulated_text.strip():
|
| 397 |
+
yield "✅ Đã xử lý xong"
|
| 398 |
+
|
| 399 |
+
async def _chat_local(self, message: str) -> AsyncGenerator[str, None]:
|
| 400 |
+
"""Local chat fallback for HF Spaces"""
|
| 401 |
+
yield "🤖 Đang suy nghĩ..."
|
| 402 |
+
await asyncio.sleep(1)
|
| 403 |
+
|
| 404 |
+
# Simple rule-based responses for demo
|
| 405 |
+
message_lower = message.lower()
|
| 406 |
+
|
| 407 |
+
if any(word in message_lower for word in ["air", "máy bay", "hàng không"]):
|
| 408 |
+
response = """
|
| 409 |
+
## ✈️ Air Transportation
|
| 410 |
+
|
| 411 |
+
**Ưu điểm:**
|
| 412 |
+
- Tốc độ nhanh nhất (1-3 ngày)
|
| 413 |
+
- Phù hợp hàng hóa có giá trị cao
|
| 414 |
+
- An toàn và bảo mật tốt
|
| 415 |
+
|
| 416 |
+
**Nhược điểm:**
|
| 417 |
+
- Chi phí cao nhất
|
| 418 |
+
- Hạn chế về trọng lượng và kích thước
|
| 419 |
+
- Phụ thuộc thời tiết
|
| 420 |
+
|
| 421 |
+
**Khi nào nên dùng:**
|
| 422 |
+
- Hàng hóa khẩn cấp
|
| 423 |
+
- Sản phẩm có giá trị cao/kg
|
| 424 |
+
- Khoảng cách xa (intercontinental)
|
| 425 |
+
"""
|
| 426 |
+
elif any(word in message_lower for word in ["ocean", "biển", "tàu"]):
|
| 427 |
+
response = """
|
| 428 |
+
## 🚢 Ocean Transportation
|
| 429 |
+
|
| 430 |
+
**Ưu điểm:**
|
| 431 |
+
- Chi phí thấp nhất cho hàng lớn
|
| 432 |
+
- Có thể vận chuyển khối lượng lớn
|
| 433 |
+
- Thân thiện môi trường
|
| 434 |
+
|
| 435 |
+
**Nhược điểm:**
|
| 436 |
+
- Thời gian lâu (2-6 tuần)
|
| 437 |
+
- Chịu ảnh hưởng thời tiết biển
|
| 438 |
+
- Cần cảng biển
|
| 439 |
+
|
| 440 |
+
**Khi nào nên dùng:**
|
| 441 |
+
- Hàng hóa không cấp bách
|
| 442 |
+
- Khối lượng lớn, giá trị thấp/kg
|
| 443 |
+
- Vận chuyển container
|
| 444 |
+
"""
|
| 445 |
+
elif any(word in message_lower for word in ["truck", "xe tải", "đường bộ"]):
|
| 446 |
+
response = """
|
| 447 |
+
## 🚛 Truck Transportation
|
| 448 |
+
|
| 449 |
+
**Ưu điểm:**
|
| 450 |
+
- Linh hoạt door-to-door
|
| 451 |
+
- Kiểm soát tốt thời gian
|
| 452 |
+
- Phù hợp khoảng cách ngắn-trung
|
| 453 |
+
|
| 454 |
+
**Nhược điểm:**
|
| 455 |
+
- Hạn chế về khoảng cách xa
|
| 456 |
+
- Ảnh hưởng giao thông, thời tiết
|
| 457 |
+
- Chi phí cao với khoảng cách xa
|
| 458 |
+
|
| 459 |
+
**Khi nào nên dùng:**
|
| 460 |
+
- Vận chuyển nội địa/khu vực
|
| 461 |
+
- Hàng cần giao nhanh
|
| 462 |
+
- Không có cảng/sân bay gần
|
| 463 |
+
"""
|
| 464 |
+
elif any(word in message_lower for word in ["dự đoán", "predict", "model"]):
|
| 465 |
+
response = """
|
| 466 |
+
## 🔮 AI Prediction Model
|
| 467 |
+
|
| 468 |
+
Hệ thống sử dụng **XGBoost** để dự đoán phương thức vận chuyển tối ưu dựa trên:
|
| 469 |
+
|
| 470 |
+
**Input Features:**
|
| 471 |
+
- 🏷️ Project Code
|
| 472 |
+
- 🌍 Destination Country
|
| 473 |
+
- 💰 Pack Price
|
| 474 |
+
- 🏢 Vendor
|
| 475 |
+
- ⚖️ Weight (optional)
|
| 476 |
+
- 🚢 Freight Cost (optional)
|
| 477 |
+
|
| 478 |
+
**Model Performance:**
|
| 479 |
+
- Độ chính xác: ~90%
|
| 480 |
+
- Hỗ trợ 4 modes: Air, Air Charter, Ocean, Truck
|
| 481 |
+
- Real-time prediction với confidence score
|
| 482 |
+
"""
|
| 483 |
+
else:
|
| 484 |
+
response = f"""
|
| 485 |
+
## 💡 Transportation Assistant
|
| 486 |
+
|
| 487 |
+
Xin chào! Tôi có thể giúp bạn về:
|
| 488 |
+
|
| 489 |
+
🔮 **Dự đoán vận chuyển** - Phân tích phương thức tối ưu
|
| 490 |
+
📊 **So sánh modes** - Air vs Ocean vs Truck
|
| 491 |
+
💰 **Tối ưu chi phí** - Cân bằng thời gian và chi phí
|
| 492 |
+
🌍 **Logistics quốc tế** - Quy trình xuất nhập khẩu
|
| 493 |
+
|
| 494 |
+
**Câu hỏi của bạn:** "{message}"
|
| 495 |
+
|
| 496 |
+
Hãy hỏi cụ thể hơn về Air, Ocean, Truck transportation hoặc prediction model!
|
| 497 |
+
"""
|
| 498 |
+
|
| 499 |
+
# Simulate streaming
|
| 500 |
+
words = response.split()
|
| 501 |
+
current_text = ""
|
| 502 |
+
|
| 503 |
+
for i, word in enumerate(words):
|
| 504 |
+
current_text += word + " "
|
| 505 |
+
if i % 3 == 0: # Update every 3 words
|
| 506 |
+
yield current_text
|
| 507 |
+
await asyncio.sleep(0.1)
|
| 508 |
+
|
| 509 |
+
yield response # Final complete response
|
| 510 |
+
|
| 511 |
+
# Global API client
|
| 512 |
+
api_client = APIClient()
|
| 513 |
+
|
| 514 |
+
def create_prediction_tab():
|
| 515 |
+
"""Create transportation prediction tab"""
|
| 516 |
+
options = api_client.options_cache
|
| 517 |
+
|
| 518 |
+
with gr.Column(elem_classes="prediction-container"):
|
| 519 |
+
# Tab header
|
| 520 |
+
gr.HTML("""
|
| 521 |
+
<div style="text-align: center; padding: 2rem; background: linear-gradient(135deg, #059669 0%, #10b981 50%, #34d399 100%); color: white; border-radius: 16px; margin-bottom: 2rem; box-shadow: 0 6px 24px rgba(5,150,105,0.2);">
|
| 522 |
+
<div style="display: flex; align-items: center; justify-content: center; gap: 1rem; margin-bottom: 0.5rem;">
|
| 523 |
+
<div style="background: rgba(255,255,255,0.2); padding: 0.5rem; border-radius: 8px;">
|
| 524 |
+
<span style="font-size: 1.5rem;">🚚</span>
|
| 525 |
+
</div>
|
| 526 |
+
<h2 style="margin: 0; font-size: 1.8rem; font-weight: 700; text-shadow: 0 2px 4px rgba(0,0,0,0.2);">Smart Transportation Prediction</h2>
|
| 527 |
+
</div>
|
| 528 |
+
<p style="margin: 0; opacity: 0.95; font-size: 1rem; font-weight: 400;">AI-powered phương thức vận chuyển tối ưu cho đơn hàng của bạn</p>
|
| 529 |
+
</div>
|
| 530 |
+
""")
|
| 531 |
+
|
| 532 |
+
with gr.Row(equal_height=True):
|
| 533 |
+
# Input Form
|
| 534 |
+
with gr.Column(scale=5, elem_classes="input-form"):
|
| 535 |
+
with gr.Group():
|
| 536 |
+
gr.Markdown("### � **Thông tin đơn hàng**")
|
| 537 |
+
|
| 538 |
+
with gr.Row():
|
| 539 |
+
project_code = gr.Dropdown(
|
| 540 |
+
choices=options.get("sample_projects", []),
|
| 541 |
+
label="🏷️ Project Code",
|
| 542 |
+
allow_custom_value=True,
|
| 543 |
+
value="100-CI-T01",
|
| 544 |
+
info="Mã dự án vận chuyển",
|
| 545 |
+
elem_classes="input-field"
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
country = gr.Dropdown(
|
| 549 |
+
choices=options.get("sample_countries", []),
|
| 550 |
+
label="🌍 Destination Country",
|
| 551 |
+
allow_custom_value=True,
|
| 552 |
+
value="Vietnam",
|
| 553 |
+
info="Quốc gia đích của đơn hàng",
|
| 554 |
+
elem_classes="input-field"
|
| 555 |
+
)
|
| 556 |
+
|
| 557 |
+
with gr.Row():
|
| 558 |
+
pack_price = gr.Number(
|
| 559 |
+
label="💰 Pack Price (USD)",
|
| 560 |
+
value=50.0,
|
| 561 |
+
minimum=0.01,
|
| 562 |
+
info="Giá mỗi gói hàng (USD)",
|
| 563 |
+
elem_classes="input-field"
|
| 564 |
+
)
|
| 565 |
+
|
| 566 |
+
vendor = gr.Dropdown(
|
| 567 |
+
choices=options.get("sample_vendors", []),
|
| 568 |
+
label="🏢 Vendor",
|
| 569 |
+
allow_custom_value=True,
|
| 570 |
+
value="ABBOTT LABORATORIES",
|
| 571 |
+
info="Nhà cung cấp hàng hóa",
|
| 572 |
+
elem_classes="input-field"
|
| 573 |
+
)
|
| 574 |
+
|
| 575 |
+
# Advanced Options
|
| 576 |
+
with gr.Accordion("⚙️ Thông tin chi tiết (Tùy chọn)", open=False):
|
| 577 |
+
with gr.Row():
|
| 578 |
+
weight_kg = gr.Number(
|
| 579 |
+
label="⚖️ Weight (kg)",
|
| 580 |
+
value=None,
|
| 581 |
+
minimum=0,
|
| 582 |
+
info="Khối lượng hàng hóa",
|
| 583 |
+
elem_classes="input-field"
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
freight_cost_usd = gr.Number(
|
| 587 |
+
label="🚢 Freight Cost (USD)",
|
| 588 |
+
value=None,
|
| 589 |
+
minimum=0,
|
| 590 |
+
info="Chi phí vận chuyển ước tính",
|
| 591 |
+
elem_classes="input-field"
|
| 592 |
+
)
|
| 593 |
+
|
| 594 |
+
with gr.Row():
|
| 595 |
+
delivery_date = gr.Textbox(
|
| 596 |
+
label="📅 Delivery Date",
|
| 597 |
+
placeholder="YYYY-MM-DD (vd: 2025-08-20)",
|
| 598 |
+
info="Ngày giao hàng mong muốn",
|
| 599 |
+
elem_classes="input-field"
|
| 600 |
+
)
|
| 601 |
+
|
| 602 |
+
line_item_quantity = gr.Number(
|
| 603 |
+
label="📦 Quantity",
|
| 604 |
+
value=100.0,
|
| 605 |
+
minimum=0,
|
| 606 |
+
info="Số lượng sản phẩm",
|
| 607 |
+
elem_classes="input-field"
|
| 608 |
+
)
|
| 609 |
+
|
| 610 |
+
# Action Button
|
| 611 |
+
with gr.Row():
|
| 612 |
+
predict_btn = gr.Button(
|
| 613 |
+
"🔮 Predict Transportation Mode",
|
| 614 |
+
variant="primary",
|
| 615 |
+
size="lg",
|
| 616 |
+
elem_classes="btn-primary",
|
| 617 |
+
scale=1
|
| 618 |
+
)
|
| 619 |
+
|
| 620 |
+
# Results Panel
|
| 621 |
+
with gr.Column(scale=7, elem_classes="results-panel"):
|
| 622 |
+
gr.Markdown("### 📊 **Kết quả dự đoán**")
|
| 623 |
+
result_output = gr.HTML(
|
| 624 |
+
value="""
|
| 625 |
+
<div style="text-align: center; padding: 3rem; background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%); border-radius: 12px; border: 2px dashed #cbd5e1;">
|
| 626 |
+
<div style="font-size: 3rem; margin-bottom: 1rem;">🎯</div>
|
| 627 |
+
<h3 style="color: #64748b; margin-bottom: 0.5rem;">Sẵn sàng dự đoán</h3>
|
| 628 |
+
<p style="color: #94a3b8; margin: 0;">Nhập thông tin đơn hàng và nhấn <strong>Predict</strong> để nhận kết quả AI</p>
|
| 629 |
+
</div>
|
| 630 |
+
""",
|
| 631 |
+
elem_classes="results-display"
|
| 632 |
+
)
|
| 633 |
+
|
| 634 |
+
# Event handler
|
| 635 |
+
# Prediction wrapper function
|
| 636 |
+
def predict_wrapper(*args):
|
| 637 |
+
result = api_client.predict_transportation(
|
| 638 |
+
project_code=args[0], country=args[1], pack_price=args[2], vendor=args[3],
|
| 639 |
+
weight_kg=args[4], freight_cost_usd=args[5], delivery_date=args[6],
|
| 640 |
+
line_item_quantity=args[7]
|
| 641 |
+
)
|
| 642 |
+
return result
|
| 643 |
+
|
| 644 |
+
predict_btn.click(
|
| 645 |
+
fn=predict_wrapper,
|
| 646 |
+
inputs=[project_code, country, pack_price, vendor,
|
| 647 |
+
weight_kg, freight_cost_usd, delivery_date, line_item_quantity],
|
| 648 |
+
outputs=result_output
|
| 649 |
+
)
|
| 650 |
+
|
| 651 |
+
# Enhanced Examples section
|
| 652 |
+
with gr.Accordion("💡 Ví dụ thực tế", open=False):
|
| 653 |
+
gr.HTML("""
|
| 654 |
+
<div style="margin-bottom: 1rem;">
|
| 655 |
+
<h4 style="color: #4f46e5; margin-bottom: 0.5rem;">📋 Các trường hợp thường gặp:</h4>
|
| 656 |
+
<p style="color: #64748b; font-size: 0.9rem;">Click vào ví dụ để tự động điền thông tin</p>
|
| 657 |
+
</div>
|
| 658 |
+
""")
|
| 659 |
+
|
| 660 |
+
gr.Examples(
|
| 661 |
+
examples=[
|
| 662 |
+
["100-CI-T01", "Vietnam", 50.0, "ABBOTT LABORATORIES", None, None, "", 100.0],
|
| 663 |
+
["200-MA-T02", "Thailand", 75.0, "PFIZER", 25.0, 500.0, "2025-09-01", 150.0],
|
| 664 |
+
["300-VN-T03", "Indonesia", 30.0, "MERCK", None, None, "2025-08-25", 80.0],
|
| 665 |
+
["400-SG-T04", "Singapore", 120.0, "JOHNSON & JOHNSON", 15.0, 300.0, "2025-08-30", 200.0],
|
| 666 |
+
["500-MY-T05", "Malaysia", 85.0, "NOVARTIS", None, None, "", 75.0]
|
| 667 |
+
],
|
| 668 |
+
inputs=[project_code, country, pack_price, vendor,
|
| 669 |
+
weight_kg, freight_cost_usd, delivery_date, line_item_quantity],
|
| 670 |
+
label="🎯 Scenario Templates",
|
| 671 |
+
examples_per_page=5
|
| 672 |
+
)
|
| 673 |
+
|
| 674 |
+
def create_chat_tab():
|
| 675 |
+
"""Create AI chat tab with real-time streaming"""
|
| 676 |
+
|
| 677 |
+
def chat_response_streaming(message, history):
|
| 678 |
+
"""Handle chat response with real-time streaming"""
|
| 679 |
+
if not message.strip():
|
| 680 |
+
yield history, ""
|
| 681 |
+
return
|
| 682 |
+
|
| 683 |
+
# Add user message to history
|
| 684 |
+
if history is None:
|
| 685 |
+
history = []
|
| 686 |
+
|
| 687 |
+
# Add user message with nice formatting
|
| 688 |
+
history.append({
|
| 689 |
+
"role": "user",
|
| 690 |
+
"content": message
|
| 691 |
+
})
|
| 692 |
+
|
| 693 |
+
# Add initial AI message placeholder
|
| 694 |
+
history.append({
|
| 695 |
+
"role": "assistant",
|
| 696 |
+
"content": "🤖 Đang suy nghĩ..."
|
| 697 |
+
})
|
| 698 |
+
|
| 699 |
+
yield history, ""
|
| 700 |
+
|
| 701 |
+
# Get streaming response
|
| 702 |
+
try:
|
| 703 |
+
import asyncio
|
| 704 |
+
loop = asyncio.new_event_loop()
|
| 705 |
+
asyncio.set_event_loop(loop)
|
| 706 |
+
|
| 707 |
+
accumulated_content = ""
|
| 708 |
+
|
| 709 |
+
async def stream_response():
|
| 710 |
+
nonlocal accumulated_content
|
| 711 |
+
async for chunk in api_client.chat_stream(message):
|
| 712 |
+
accumulated_content = chunk
|
| 713 |
+
# Update the last message (AI response) in history
|
| 714 |
+
history[-1] = {
|
| 715 |
+
"role": "assistant",
|
| 716 |
+
"content": accumulated_content
|
| 717 |
+
}
|
| 718 |
+
yield history, ""
|
| 719 |
+
|
| 720 |
+
# Run the async generator
|
| 721 |
+
async_gen = stream_response()
|
| 722 |
+
try:
|
| 723 |
+
while True:
|
| 724 |
+
result = loop.run_until_complete(async_gen.__anext__())
|
| 725 |
+
yield result
|
| 726 |
+
except StopAsyncIteration:
|
| 727 |
+
pass
|
| 728 |
+
finally:
|
| 729 |
+
loop.close()
|
| 730 |
+
|
| 731 |
+
except Exception as e:
|
| 732 |
+
# Update with error message
|
| 733 |
+
history[-1] = {
|
| 734 |
+
"role": "assistant",
|
| 735 |
+
"content": f"❌ **Lỗi:** {str(e)}\n\nVui lòng thử lại sau hoặc kiểm tra kết nối API."
|
| 736 |
+
}
|
| 737 |
+
yield history, ""
|
| 738 |
+
|
| 739 |
+
with gr.Column(elem_classes="chat-container"):
|
| 740 |
+
# Enhanced Chat header
|
| 741 |
+
gr.HTML("""
|
| 742 |
+
<div style="text-align: center; padding: 2rem; background: linear-gradient(135deg, #7c3aed 0%, #a855f7 50%, #c084fc 100%); color: white; border-radius: 16px; margin-bottom: 2rem; box-shadow: 0 6px 24px rgba(124,58,237,0.2);">
|
| 743 |
+
<div style="display: flex; align-items: center; justify-content: center; gap: 1rem; margin-bottom: 0.5rem;">
|
| 744 |
+
<div style="background: rgba(255,255,255,0.2); padding: 0.5rem; border-radius: 8px; display: flex; align-items: center; gap: 0.25rem;">
|
| 745 |
+
<span style="font-size: 1.25rem;">🤖</span>
|
| 746 |
+
<span style="font-size: 1.25rem;">✨</span>
|
| 747 |
+
</div>
|
| 748 |
+
<h2 style="margin: 0; font-size: 1.8rem; font-weight: 700; text-shadow: 0 2px 4px rgba(0,0,0,0.2);">AI Transportation Assistant</h2>
|
| 749 |
+
</div>
|
| 750 |
+
<p style="margin: 0; opacity: 0.95; font-size: 1rem; font-weight: 400;">Trợ lý thông minh cho logistics và vận chuyển quốc tế</p>
|
| 751 |
+
<div style="margin-top: 1.5rem; display: flex; justify-content: center; gap: 1rem; flex-wrap: wrap;">
|
| 752 |
+
<span style="background: rgba(255,255,255,0.15); padding: 0.5rem 1rem; border-radius: 20px; font-size: 0.85rem; font-weight: 500;">💡 Tư vấn chiến lược</span>
|
| 753 |
+
<span style="background: rgba(255,255,255,0.15); padding: 0.5rem 1rem; border-radius: 20px; font-size: 0.85rem; font-weight: 500;">📊 Phân tích dữ liệu</span>
|
| 754 |
+
<span style="background: rgba(255,255,255,0.15); padding: 0.5rem 1rem; border-radius: 20px; font-size: 0.85rem; font-weight: 500;">🎯 Dự đoán chính xác</span>
|
| 755 |
+
</div>
|
| 756 |
+
</div>
|
| 757 |
+
""")
|
| 758 |
+
|
| 759 |
+
# Enhanced Chat interface
|
| 760 |
+
chatbot = gr.Chatbot(
|
| 761 |
+
label="",
|
| 762 |
+
height=600,
|
| 763 |
+
placeholder="💬 Bắt đầu cuộc trò chuyện với AI assistant. Tôi có thể giúp bạn về:\n\n🔮 Dự đoán phương thức vận chuyển\n📊 Phân tích chi phí logistics\n🌍 Tư vấn vận chuyển quốc tế\n💡 Tối ưu hóa quy trình\n\nHãy hỏi tôi bất cứ điều gì!",
|
| 764 |
+
show_copy_button=True,
|
| 765 |
+
type="messages",
|
| 766 |
+
elem_classes="modern-chatbot",
|
| 767 |
+
avatar_images=(
|
| 768 |
+
"https://cdn-icons-png.flaticon.com/512/149/149071.png", # User avatar
|
| 769 |
+
"https://cdn-icons-png.flaticon.com/512/4712/4712109.png" # Bot avatar
|
| 770 |
+
),
|
| 771 |
+
show_share_button=False
|
| 772 |
+
)
|
| 773 |
+
|
| 774 |
+
# Enhanced Input area
|
| 775 |
+
with gr.Group(elem_classes="chat-input-group"):
|
| 776 |
+
gr.HTML("""
|
| 777 |
+
<div style="background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%); padding: 1rem; border-radius: 12px 12px 0 0; border-bottom: 2px solid #e2e8f0;">
|
| 778 |
+
<div style="display: flex; align-items: center; gap: 0.5rem; color: #64748b; font-size: 0.9rem;">
|
| 779 |
+
<span>💭</span>
|
| 780 |
+
<span>Nhập câu hỏi của bạn về logistics, vận chuyển hoặc dự đoán AI...</span>
|
| 781 |
+
</div>
|
| 782 |
+
</div>
|
| 783 |
+
""")
|
| 784 |
+
|
| 785 |
+
with gr.Row(elem_classes="chat-input-row"):
|
| 786 |
+
msg = gr.Textbox(
|
| 787 |
+
label="",
|
| 788 |
+
placeholder="Ví dụ: 'Phân tích ưu nhược điểm của Air shipping vs Ocean shipping cho hàng hóa 50kg đến Việt Nam'",
|
| 789 |
+
container=False,
|
| 790 |
+
scale=5,
|
| 791 |
+
elem_classes="chat-input",
|
| 792 |
+
lines=1,
|
| 793 |
+
max_lines=3
|
| 794 |
+
)
|
| 795 |
+
with gr.Column(scale=1, min_width=120):
|
| 796 |
+
with gr.Row():
|
| 797 |
+
send_btn = gr.Button(
|
| 798 |
+
"� Gửi",
|
| 799 |
+
variant="primary",
|
| 800 |
+
size="lg",
|
| 801 |
+
elem_classes="send-button"
|
| 802 |
+
)
|
| 803 |
+
clear_btn = gr.Button(
|
| 804 |
+
"🗑️",
|
| 805 |
+
variant="secondary",
|
| 806 |
+
size="sm",
|
| 807 |
+
elem_classes="clear-button"
|
| 808 |
+
)
|
| 809 |
+
|
| 810 |
+
# Event handlers for streaming
|
| 811 |
+
msg.submit(
|
| 812 |
+
chat_response_streaming,
|
| 813 |
+
[msg, chatbot],
|
| 814 |
+
[chatbot, msg],
|
| 815 |
+
show_progress="hidden"
|
| 816 |
+
)
|
| 817 |
+
send_btn.click(
|
| 818 |
+
chat_response_streaming,
|
| 819 |
+
[msg, chatbot],
|
| 820 |
+
[chatbot, msg],
|
| 821 |
+
show_progress="hidden"
|
| 822 |
+
)
|
| 823 |
+
clear_btn.click(
|
| 824 |
+
lambda: ([], ""),
|
| 825 |
+
outputs=[chatbot, msg],
|
| 826 |
+
show_progress="hidden"
|
| 827 |
+
)
|
| 828 |
+
|
| 829 |
+
# Enhanced Examples with categories
|
| 830 |
+
with gr.Accordion("💡 Câu hỏi mẫu & Chủ đề phổ biến", open=False):
|
| 831 |
+
gr.HTML("""
|
| 832 |
+
<div style="margin-bottom: 1.5rem; text-align: center;">
|
| 833 |
+
<h4 style="color: #4f46e5; margin-bottom: 0.5rem; font-size: 1.1rem;">🎯 Khám phá các tính năng của AI Assistant</h4>
|
| 834 |
+
<p style="color: #64748b; font-size: 0.9rem;">Click vào câu hỏi để bắt đầu cuộc trò chuyện</p>
|
| 835 |
+
</div>
|
| 836 |
+
""")
|
| 837 |
+
|
| 838 |
+
with gr.Row():
|
| 839 |
+
with gr.Column():
|
| 840 |
+
gr.HTML("<h5 style='color: #059669; margin-bottom: 1rem;'>🔮 Dự đoán & Phân tích</h5>")
|
| 841 |
+
gr.Examples(
|
| 842 |
+
examples=[
|
| 843 |
+
"Dự đoán phương thức vận chuyển cho project 100-CI-T01 với 50kg hàng đến Vietnam",
|
| 844 |
+
"So sánh chi phí Air vs Ocean shipping cho hàng 25kg đến Thailand",
|
| 845 |
+
"Phân tích yếu tố nào ảnh hưởng đến việc chọn phương thức vận chuyển",
|
| 846 |
+
"Tại sao AI lại đề xuất Air shipping thay vì Ocean cho đơn hàng này?"
|
| 847 |
+
],
|
| 848 |
+
inputs=msg,
|
| 849 |
+
examples_per_page=4
|
| 850 |
+
)
|
| 851 |
+
|
| 852 |
+
with gr.Column():
|
| 853 |
+
gr.HTML("<h5 style='color: #dc2626; margin-bottom: 1rem;'>� Tư vấn & Chiến lược</h5>")
|
| 854 |
+
gr.Examples(
|
| 855 |
+
examples=[
|
| 856 |
+
"Làm thế nào để tối ưu hóa chi phí logistics cho doanh nghiệp?",
|
| 857 |
+
"Xu hướng vận chuyển quốc tế hiện tại và tương lai như thế nào?",
|
| 858 |
+
"Những thách thức chính trong vận chuyển hàng hóa sang ASEAN?",
|
| 859 |
+
"Cách lựa chọn vendor và partner logistics phù hợp?"
|
| 860 |
+
],
|
| 861 |
+
inputs=msg,
|
| 862 |
+
examples_per_page=4
|
| 863 |
+
)
|
| 864 |
+
|
| 865 |
+
with gr.Row():
|
| 866 |
+
with gr.Column():
|
| 867 |
+
gr.HTML("<h5 style='color: #7c2d12; margin-bottom: 1rem;'>🎓 Kiến thức & Học hỏi</h5>")
|
| 868 |
+
gr.Examples(
|
| 869 |
+
examples=[
|
| 870 |
+
"Giải thích các thuật ngữ logistics: FOB, CIF, EXW là gì?",
|
| 871 |
+
"Quy trình hải quan và giấy tờ cần thiết cho xuất nhập khẩu",
|
| 872 |
+
"Sự khác biệt giữa Air Charter và Regular Air shipping",
|
| 873 |
+
"Các yếu tố địa lý ảnh hưởng đến chi phí vận chuyển"
|
| 874 |
+
],
|
| 875 |
+
inputs=msg,
|
| 876 |
+
examples_per_page=4
|
| 877 |
+
)
|
| 878 |
+
|
| 879 |
+
with gr.Column():
|
| 880 |
+
gr.HTML("<h5 style='color: #1e40af; margin-bottom: 1rem;'>📊 Dữ liệu & Báo cáo</h5>")
|
| 881 |
+
gr.Examples(
|
| 882 |
+
examples=[
|
| 883 |
+
"Thống kê xu hướng vận chuyển từ dataset hiện tại",
|
| 884 |
+
"Phân tích mối tương quan giữa trọng lượng và chi phí",
|
| 885 |
+
"So sánh performance các vendor trong hệ thống",
|
| 886 |
+
"Dự báo chi phí vận chuyển cho quý tới"
|
| 887 |
+
],
|
| 888 |
+
inputs=msg,
|
| 889 |
+
examples_per_page=4
|
| 890 |
+
)
|
| 891 |
+
|
| 892 |
+
# Chat tips
|
| 893 |
+
gr.HTML("""
|
| 894 |
+
<div style="margin-top: 1.5rem; padding: 1rem; background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%); border-radius: 12px; border-left: 4px solid #f59e0b;">
|
| 895 |
+
<h4 style="margin: 0 0 0.5rem 0; color: #92400e;">💡 Mẹo sử dụng AI Assistant</h4>
|
| 896 |
+
<ul style="margin: 0; color: #92400e; font-size: 0.9rem;">
|
| 897 |
+
<li>Đưa ra thông tin chi tiết để được tư vấn chính xác hơn</li>
|
| 898 |
+
<li>Hỏi về các case study cụ thể từ dữ liệu thực tế</li>
|
| 899 |
+
<li>Yêu cầu so sánh và phân tích đa chiều</li>
|
| 900 |
+
<li>Chat hỗ trợ streaming real-time cho trải nghiệm mượt mà</li>
|
| 901 |
+
</ul>
|
| 902 |
+
</div>
|
| 903 |
+
""")
|
| 904 |
+
|
| 905 |
+
def create_app():
|
| 906 |
+
"""Create the main Gradio application"""
|
| 907 |
+
|
| 908 |
+
# Enhanced Custom CSS for modern UI
|
| 909 |
+
custom_css = """
|
| 910 |
+
/* Import modern fonts */
|
| 911 |
+
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800&family=JetBrains+Mono:wght@400;500;600&display=swap');
|
| 912 |
+
|
| 913 |
+
.gradio-container {
|
| 914 |
+
max-width: 1400px !important;
|
| 915 |
+
margin: auto;
|
| 916 |
+
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
|
| 917 |
+
background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%);
|
| 918 |
+
min-height: 100vh;
|
| 919 |
+
}
|
| 920 |
+
|
| 921 |
+
/* Main Header */
|
| 922 |
+
.main-header {
|
| 923 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 924 |
+
color: white;
|
| 925 |
+
padding: 2rem;
|
| 926 |
+
border-radius: 16px;
|
| 927 |
+
margin-bottom: 2rem;
|
| 928 |
+
text-align: center;
|
| 929 |
+
box-shadow: 0 10px 30px rgba(102, 126, 234, 0.3);
|
| 930 |
+
}
|
| 931 |
+
|
| 932 |
+
/* Tab Navigation */
|
| 933 |
+
.tab-nav .tab-nav-button {
|
| 934 |
+
font-weight: 600 !important;
|
| 935 |
+
padding: 0.75rem 1.5rem !important;
|
| 936 |
+
border-radius: 10px !important;
|
| 937 |
+
margin: 0 0.25rem !important;
|
| 938 |
+
transition: all 0.3s ease !important;
|
| 939 |
+
}
|
| 940 |
+
|
| 941 |
+
.tab-nav .tab-nav-button.selected {
|
| 942 |
+
background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%) !important;
|
| 943 |
+
color: white !important;
|
| 944 |
+
box-shadow: 0 4px 15px rgba(79, 70, 229, 0.4) !important;
|
| 945 |
+
}
|
| 946 |
+
|
| 947 |
+
/* Card Styling */
|
| 948 |
+
.block, .gr-box {
|
| 949 |
+
border-radius: 12px !important;
|
| 950 |
+
box-shadow: 0 4px 20px rgba(0,0,0,0.08) !important;
|
| 951 |
+
border: 1px solid #e2e8f0 !important;
|
| 952 |
+
transition: all 0.3s ease !important;
|
| 953 |
+
background: white !important;
|
| 954 |
+
}
|
| 955 |
+
|
| 956 |
+
.block:hover, .gr-box:hover {
|
| 957 |
+
box-shadow: 0 8px 30px rgba(0,0,0,0.12) !important;
|
| 958 |
+
transform: translateY(-2px) !important;
|
| 959 |
+
}
|
| 960 |
+
|
| 961 |
+
/* Button Styling */
|
| 962 |
+
.btn-primary, .gr-button.primary {
|
| 963 |
+
background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%) !important;
|
| 964 |
+
border: none !important;
|
| 965 |
+
border-radius: 10px !important;
|
| 966 |
+
font-weight: 600 !important;
|
| 967 |
+
padding: 0.75rem 1.5rem !important;
|
| 968 |
+
transition: all 0.3s ease !important;
|
| 969 |
+
box-shadow: 0 4px 15px rgba(79, 70, 229, 0.3) !important;
|
| 970 |
+
}
|
| 971 |
+
|
| 972 |
+
.btn-primary:hover, .gr-button.primary:hover {
|
| 973 |
+
transform: translateY(-2px) !important;
|
| 974 |
+
box-shadow: 0 6px 20px rgba(79, 70, 229, 0.4) !important;
|
| 975 |
+
}
|
| 976 |
+
|
| 977 |
+
.send-button {
|
| 978 |
+
background: linear-gradient(135deg, #10b981 0%, #059669 100%) !important;
|
| 979 |
+
border: none !important;
|
| 980 |
+
color: white !important;
|
| 981 |
+
font-weight: 600 !important;
|
| 982 |
+
border-radius: 10px !important;
|
| 983 |
+
padding: 0.75rem 1rem !important;
|
| 984 |
+
transition: all 0.3s ease !important;
|
| 985 |
+
}
|
| 986 |
+
|
| 987 |
+
.send-button:hover {
|
| 988 |
+
transform: translateY(-1px) !important;
|
| 989 |
+
box-shadow: 0 4px 15px rgba(16, 185, 129, 0.4) !important;
|
| 990 |
+
}
|
| 991 |
+
|
| 992 |
+
.clear-button {
|
| 993 |
+
background: linear-gradient(135deg, #ef4444 0%, #dc2626 100%) !important;
|
| 994 |
+
border: none !important;
|
| 995 |
+
color: white !important;
|
| 996 |
+
border-radius: 8px !important;
|
| 997 |
+
padding: 0.5rem !important;
|
| 998 |
+
transition: all 0.3s ease !important;
|
| 999 |
+
}
|
| 1000 |
+
|
| 1001 |
+
/* Input Field Styling */
|
| 1002 |
+
.gr-textbox, .gr-dropdown, .gr-number {
|
| 1003 |
+
border-radius: 10px !important;
|
| 1004 |
+
border: 2px solid #e2e8f0 !important;
|
| 1005 |
+
transition: all 0.3s ease !important;
|
| 1006 |
+
font-family: 'Inter', sans-serif !important;
|
| 1007 |
+
}
|
| 1008 |
+
|
| 1009 |
+
.gr-textbox:focus, .gr-dropdown:focus, .gr-number:focus {
|
| 1010 |
+
border-color: #4f46e5 !important;
|
| 1011 |
+
box-shadow: 0 0 0 3px rgba(79, 70, 229, 0.1) !important;
|
| 1012 |
+
}
|
| 1013 |
+
|
| 1014 |
+
/* Chat Styling */
|
| 1015 |
+
.modern-chatbot {
|
| 1016 |
+
border-radius: 12px !important;
|
| 1017 |
+
background: linear-gradient(135deg, #fafafa 0%, #f3f4f6 100%) !important;
|
| 1018 |
+
border: 1px solid #e2e8f0 !important;
|
| 1019 |
+
}
|
| 1020 |
+
|
| 1021 |
+
.chat-input {
|
| 1022 |
+
border-radius: 12px !important;
|
| 1023 |
+
border: 2px solid #e2e8f0 !important;
|
| 1024 |
+
background: white !important;
|
| 1025 |
+
padding: 1rem !important;
|
| 1026 |
+
font-size: 1rem !important;
|
| 1027 |
+
transition: all 0.3s ease !important;
|
| 1028 |
+
}
|
| 1029 |
+
|
| 1030 |
+
.chat-input:focus {
|
| 1031 |
+
border-color: #6366f1 !important;
|
| 1032 |
+
box-shadow: 0 0 0 3px rgba(99, 102, 241, 0.1) !important;
|
| 1033 |
+
}
|
| 1034 |
+
|
| 1035 |
+
.chat-input-group {
|
| 1036 |
+
background: white !important;
|
| 1037 |
+
border-radius: 12px !important;
|
| 1038 |
+
box-shadow: 0 4px 20px rgba(0,0,0,0.08) !important;
|
| 1039 |
+
border: 1px solid #e2e8f0 !important;
|
| 1040 |
+
overflow: hidden !important;
|
| 1041 |
+
}
|
| 1042 |
+
|
| 1043 |
+
.chat-input-row {
|
| 1044 |
+
padding: 1rem !important;
|
| 1045 |
+
gap: 1rem !important;
|
| 1046 |
+
}
|
| 1047 |
+
|
| 1048 |
+
/* Message Bubbles */
|
| 1049 |
+
.message.user {
|
| 1050 |
+
background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%) !important;
|
| 1051 |
+
color: white !important;
|
| 1052 |
+
border-radius: 18px 18px 4px 18px !important;
|
| 1053 |
+
padding: 1rem 1.25rem !important;
|
| 1054 |
+
margin: 0.5rem 0 !important;
|
| 1055 |
+
max-width: 80% !important;
|
| 1056 |
+
margin-left: auto !important;
|
| 1057 |
+
}
|
| 1058 |
+
|
| 1059 |
+
.message.bot {
|
| 1060 |
+
background: white !important;
|
| 1061 |
+
color: #1e293b !important;
|
| 1062 |
+
border: 1px solid #e2e8f0 !important;
|
| 1063 |
+
border-radius: 18px 18px 18px 4px !important;
|
| 1064 |
+
padding: 1rem 1.25rem !important;
|
| 1065 |
+
margin: 0.5rem 0 !important;
|
| 1066 |
+
max-width: 80% !important;
|
| 1067 |
+
box-shadow: 0 2px 10px rgba(0,0,0,0.1) !important;
|
| 1068 |
+
}
|
| 1069 |
+
|
| 1070 |
+
/* Prediction Results */
|
| 1071 |
+
.prediction-container {
|
| 1072 |
+
background: white !important;
|
| 1073 |
+
border-radius: 12px !important;
|
| 1074 |
+
padding: 1.5rem !important;
|
| 1075 |
+
box-shadow: 0 4px 20px rgba(0,0,0,0.08) !important;
|
| 1076 |
+
border: 1px solid #e2e8f0 !important;
|
| 1077 |
+
}
|
| 1078 |
+
|
| 1079 |
+
/* Input Form */
|
| 1080 |
+
.input-form {
|
| 1081 |
+
background: white !important;
|
| 1082 |
+
border-radius: 12px !important;
|
| 1083 |
+
padding: 1.5rem !important;
|
| 1084 |
+
box-shadow: 0 4px 20px rgba(0,0,0,0.08) !important;
|
| 1085 |
+
border: 1px solid #e2e8f0 !important;
|
| 1086 |
+
}
|
| 1087 |
+
|
| 1088 |
+
.input-field {
|
| 1089 |
+
margin-bottom: 1rem !important;
|
| 1090 |
+
}
|
| 1091 |
+
|
| 1092 |
+
/* Results Panel */
|
| 1093 |
+
.result-panel {
|
| 1094 |
+
background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%) !important;
|
| 1095 |
+
border-radius: 12px !important;
|
| 1096 |
+
padding: 1.5rem !important;
|
| 1097 |
+
border: 1px solid #7dd3fc !important;
|
| 1098 |
+
min-height: 400px !important;
|
| 1099 |
+
}
|
| 1100 |
+
|
| 1101 |
+
/* Examples */
|
| 1102 |
+
.example-group {
|
| 1103 |
+
background: white !important;
|
| 1104 |
+
border-radius: 8px !important;
|
| 1105 |
+
padding: 1rem !important;
|
| 1106 |
+
border: 1px solid #e2e8f0 !important;
|
| 1107 |
+
margin-bottom: 1rem !important;
|
| 1108 |
+
}
|
| 1109 |
+
|
| 1110 |
+
.example-group .gr-button {
|
| 1111 |
+
background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%) !important;
|
| 1112 |
+
border: 1px solid #cbd5e1 !important;
|
| 1113 |
+
color: #475569 !important;
|
| 1114 |
+
border-radius: 8px !important;
|
| 1115 |
+
padding: 0.75rem 1rem !important;
|
| 1116 |
+
margin: 0.25rem !important;
|
| 1117 |
+
transition: all 0.3s ease !important;
|
| 1118 |
+
font-size: 0.9rem !important;
|
| 1119 |
+
text-align: left !important;
|
| 1120 |
+
}
|
| 1121 |
+
|
| 1122 |
+
.example-group .gr-button:hover {
|
| 1123 |
+
background: linear-gradient(135deg, #e2e8f0 0%, #cbd5e1 100%) !important;
|
| 1124 |
+
transform: translateY(-1px) !important;
|
| 1125 |
+
box-shadow: 0 4px 15px rgba(0,0,0,0.1) !important;
|
| 1126 |
+
}
|
| 1127 |
+
|
| 1128 |
+
/* Accordion */
|
| 1129 |
+
.gr-accordion {
|
| 1130 |
+
border-radius: 12px !important;
|
| 1131 |
+
border: 1px solid #e2e8f0 !important;
|
| 1132 |
+
background: white !important;
|
| 1133 |
+
overflow: hidden !important;
|
| 1134 |
+
}
|
| 1135 |
+
|
| 1136 |
+
.gr-accordion summary {
|
| 1137 |
+
background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%) !important;
|
| 1138 |
+
padding: 1rem 1.5rem !important;
|
| 1139 |
+
font-weight: 600 !important;
|
| 1140 |
+
color: #374151 !important;
|
| 1141 |
+
border-bottom: 1px solid #e2e8f0 !important;
|
| 1142 |
+
}
|
| 1143 |
+
|
| 1144 |
+
/* Animations */
|
| 1145 |
+
@keyframes slideUp {
|
| 1146 |
+
from { transform: translateY(20px); opacity: 0; }
|
| 1147 |
+
to { transform: translateY(0); opacity: 1; }
|
| 1148 |
+
}
|
| 1149 |
+
|
| 1150 |
+
.block, .gr-box {
|
| 1151 |
+
animation: slideUp 0.3s ease-out !important;
|
| 1152 |
+
}
|
| 1153 |
+
|
| 1154 |
+
/* Success/Error Styling */
|
| 1155 |
+
.success {
|
| 1156 |
+
background: linear-gradient(135deg, #10b981 0%, #059669 100%) !important;
|
| 1157 |
+
color: white !important;
|
| 1158 |
+
padding: 1rem !important;
|
| 1159 |
+
border-radius: 10px !important;
|
| 1160 |
+
margin: 0.5rem 0 !important;
|
| 1161 |
+
box-shadow: 0 4px 15px rgba(16, 185, 129, 0.3) !important;
|
| 1162 |
+
}
|
| 1163 |
+
|
| 1164 |
+
.error {
|
| 1165 |
+
background: linear-gradient(135deg, #ef4444 0%, #dc2626 100%) !important;
|
| 1166 |
+
color: white !important;
|
| 1167 |
+
padding: 1rem !important;
|
| 1168 |
+
border-radius: 10px !important;
|
| 1169 |
+
margin: 0.5rem 0 !important;
|
| 1170 |
+
box-shadow: 0 4px 15px rgba(239, 68, 68, 0.3) !important;
|
| 1171 |
+
}
|
| 1172 |
+
|
| 1173 |
+
/* Responsive Design */
|
| 1174 |
+
@media (max-width: 768px) {
|
| 1175 |
+
.gradio-container {
|
| 1176 |
+
padding: 1rem !important;
|
| 1177 |
+
}
|
| 1178 |
+
|
| 1179 |
+
.main-header {
|
| 1180 |
+
padding: 1.5rem !important;
|
| 1181 |
+
}
|
| 1182 |
+
|
| 1183 |
+
.chat-input-row {
|
| 1184 |
+
flex-direction: column !important;
|
| 1185 |
+
}
|
| 1186 |
+
|
| 1187 |
+
.send-button, .clear-button {
|
| 1188 |
+
width: 100% !important;
|
| 1189 |
+
margin-top: 0.5rem !important;
|
| 1190 |
+
}
|
| 1191 |
+
}
|
| 1192 |
+
|
| 1193 |
+
/* Dark mode support */
|
| 1194 |
+
@media (prefers-color-scheme: dark) {
|
| 1195 |
+
.gradio-container {
|
| 1196 |
+
background: linear-gradient(135deg, #1e293b 0%, #334155 100%) !important;
|
| 1197 |
+
}
|
| 1198 |
+
|
| 1199 |
+
.block, .gr-box {
|
| 1200 |
+
background: #334155 !important;
|
| 1201 |
+
border-color: #475569 !important;
|
| 1202 |
+
color: #f1f5f9 !important;
|
| 1203 |
+
}
|
| 1204 |
+
|
| 1205 |
+
.gr-textbox, .gr-dropdown, .gr-number {
|
| 1206 |
+
background: #475569 !important;
|
| 1207 |
+
border-color: #64748b !important;
|
| 1208 |
+
color: #f1f5f9 !important;
|
| 1209 |
+
}
|
| 1210 |
+
}
|
| 1211 |
+
"""
|
| 1212 |
+
|
| 1213 |
+
# Create modern theme
|
| 1214 |
+
theme = gr.themes.Soft(
|
| 1215 |
+
primary_hue=gr.themes.colors.violet,
|
| 1216 |
+
secondary_hue=gr.themes.colors.blue,
|
| 1217 |
+
neutral_hue=gr.themes.colors.slate,
|
| 1218 |
+
font=[gr.themes.GoogleFont("Inter"), "system-ui", "sans-serif"],
|
| 1219 |
+
font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "Consolas", "monospace"]
|
| 1220 |
+
).set(
|
| 1221 |
+
body_background_fill="linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%)",
|
| 1222 |
+
body_text_color="#1e293b",
|
| 1223 |
+
button_primary_background_fill="linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%)",
|
| 1224 |
+
button_primary_text_color="white",
|
| 1225 |
+
block_background_fill="white",
|
| 1226 |
+
block_border_color="#e2e8f0",
|
| 1227 |
+
block_border_width="1px",
|
| 1228 |
+
block_radius="12px"
|
| 1229 |
+
)
|
| 1230 |
+
|
| 1231 |
+
with gr.Blocks(
|
| 1232 |
+
title="🚚 Transportation AI | Prediction & Assistant",
|
| 1233 |
+
theme=theme,
|
| 1234 |
+
css=custom_css,
|
| 1235 |
+
analytics_enabled=False
|
| 1236 |
+
) as app:
|
| 1237 |
+
|
| 1238 |
+
# Modern Header
|
| 1239 |
+
with gr.Row(elem_classes="main-header"):
|
| 1240 |
+
gr.HTML("""
|
| 1241 |
+
<div style="background: linear-gradient(135deg, #1e40af 0%, #3b82f6 50%, #06b6d4 100%); padding: 2.5rem 1rem; border-radius: 16px; margin-bottom: 2rem; box-shadow: 0 8px 32px rgba(0,0,0,0.1);">
|
| 1242 |
+
<div style="text-align: center; color: white;">
|
| 1243 |
+
<div style="display: flex; align-items: center; justify-content: center; gap: 1rem; margin-bottom: 1rem;">
|
| 1244 |
+
<div style="background: rgba(255,255,255,0.2); padding: 0.75rem; border-radius: 12px; backdrop-filter: blur(10px);">
|
| 1245 |
+
<span style="font-size: 2rem;">🚚</span>
|
| 1246 |
+
</div>
|
| 1247 |
+
<h1 style="font-size: 2.5rem; font-weight: 800; margin: 0; text-shadow: 0 2px 4px rgba(0,0,0,0.3);">
|
| 1248 |
+
Transportation AI Platform
|
| 1249 |
+
</h1>
|
| 1250 |
+
</div>
|
| 1251 |
+
<p style="font-size: 1.1rem; opacity: 0.95; margin-bottom: 2rem; font-weight: 400;">
|
| 1252 |
+
Hệ thống dự đoán phương thức vận chuyển thông minh với AI
|
| 1253 |
+
</p>
|
| 1254 |
+
<div style="display: flex; justify-content: center; gap: 2.5rem; flex-wrap: wrap;">
|
| 1255 |
+
<div style="background: rgba(255,255,255,0.15); padding: 0.75rem 1.5rem; border-radius: 25px; backdrop-filter: blur(10px); display: flex; align-items: center; gap: 0.75rem; transition: all 0.3s ease;">
|
| 1256 |
+
<span style="font-size: 1.25rem;">🔮</span>
|
| 1257 |
+
<span style="font-weight: 600; font-size: 0.95rem;">Smart Prediction</span>
|
| 1258 |
+
</div>
|
| 1259 |
+
<div style="background: rgba(255,255,255,0.15); padding: 0.75rem 1.5rem; border-radius: 25px; backdrop-filter: blur(10px); display: flex; align-items: center; gap: 0.75rem; transition: all 0.3s ease;">
|
| 1260 |
+
<span style="font-size: 1.25rem;">🤖</span>
|
| 1261 |
+
<span style="font-weight: 600; font-size: 0.95rem;">AI Assistant</span>
|
| 1262 |
+
</div>
|
| 1263 |
+
<div style="background: rgba(255,255,255,0.15); padding: 0.75rem 1.5rem; border-radius: 25px; backdrop-filter: blur(10px); display: flex; align-items: center; gap: 0.75rem; transition: all 0.3s ease;">
|
| 1264 |
+
<span style="font-size: 1.25rem;">📊</span>
|
| 1265 |
+
<span style="font-weight: 600; font-size: 0.95rem;">Real-time Analytics</span>
|
| 1266 |
+
</div>
|
| 1267 |
+
</div>
|
| 1268 |
+
</div>
|
| 1269 |
+
</div>
|
| 1270 |
+
""")
|
| 1271 |
+
|
| 1272 |
+
# Status indicator
|
| 1273 |
+
with gr.Row():
|
| 1274 |
+
gr.HTML("""
|
| 1275 |
+
<div style="text-align: center; padding: 1rem; background: linear-gradient(135deg, #10b981 0%, #059669 100%); color: white; border-radius: 8px; margin: 1rem 0;">
|
| 1276 |
+
<strong>✅ API Server Status:</strong> Transportation Prediction API v2.0 - Ready
|
| 1277 |
+
</div>
|
| 1278 |
+
""")
|
| 1279 |
+
|
| 1280 |
+
# Main tabs with improved styling
|
| 1281 |
+
with gr.Tabs(elem_classes="main-tabs") as tabs:
|
| 1282 |
+
with gr.Tab("🔮 Smart Prediction", elem_id="prediction-tab"):
|
| 1283 |
+
create_prediction_tab()
|
| 1284 |
+
|
| 1285 |
+
with gr.Tab("🤖 AI Assistant", elem_id="chat-tab"):
|
| 1286 |
+
create_chat_tab()
|
| 1287 |
+
|
| 1288 |
+
# Enhanced Footer
|
| 1289 |
+
with gr.Row():
|
| 1290 |
+
gr.Markdown("""
|
| 1291 |
+
<div style="text-align: center; padding: 2rem; background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%); border-radius: 12px; margin-top: 2rem;">
|
| 1292 |
+
<h3 style="color: #475569; margin-bottom: 1rem;">💡 Hướng dẫn sử dụng</h3>
|
| 1293 |
+
<div style="display: grid; grid-template-columns: 1fr 1fr; gap: 2rem; max-width: 800px; margin: 0 auto;">
|
| 1294 |
+
<div style="text-align: left;">
|
| 1295 |
+
<h4 style="color: #7c3aed; margin-bottom: 0.5rem;">🔮 Smart Prediction</h4>
|
| 1296 |
+
<p style="color: #64748b; font-size: 0.9rem;">Nhập thông tin đơn hàng để AI dự đoán phương thức vận chuyển tối ưu nhất</p>
|
| 1297 |
+
</div>
|
| 1298 |
+
<div style="text-align: left;">
|
| 1299 |
+
<h4 style="color: #7c3aed; margin-bottom: 0.5rem;">🤖 AI Assistant</h4>
|
| 1300 |
+
<p style="color: #64748b; font-size: 0.9rem;">Chat với AI để được tư vấn chuyên sâu về logistics và vận chuyển</p>
|
| 1301 |
+
</div>
|
| 1302 |
+
</div>
|
| 1303 |
+
<div style="margin-top: 1.5rem; padding-top: 1rem; border-top: 1px solid #cbd5e1;">
|
| 1304 |
+
<p style="color: #64748b; font-size: 0.85rem;">
|
| 1305 |
+
<strong>🔧 API Endpoint:</strong> <code style="background: #f1f5f9; padding: 0.2rem 0.5rem; border-radius: 4px;">{}</code>
|
| 1306 |
+
</p>
|
| 1307 |
+
</div>
|
| 1308 |
+
</div>
|
| 1309 |
+
""".format(API_BASE_URL))
|
| 1310 |
+
|
| 1311 |
+
return app
|
| 1312 |
+
|
| 1313 |
+
def main():
|
| 1314 |
+
"""Main function to run the application"""
|
| 1315 |
+
|
| 1316 |
+
# Check if running on HF Spaces
|
| 1317 |
+
is_hf_spaces = os.getenv("SPACE_ID") is not None
|
| 1318 |
+
|
| 1319 |
+
# Create and launch app
|
| 1320 |
+
app = create_app()
|
| 1321 |
+
|
| 1322 |
+
# Always use port 7860 for HF Spaces compatibility
|
| 1323 |
+
print("🚀 Starting Gradio app on port 7860...")
|
| 1324 |
+
app.launch(
|
| 1325 |
+
server_name="0.0.0.0",
|
| 1326 |
+
server_port=7860,
|
| 1327 |
+
share=False,
|
| 1328 |
+
show_error=True
|
| 1329 |
+
)
|
| 1330 |
+
|
| 1331 |
+
if __name__ == "__main__":
|
| 1332 |
+
main()
|
source/conversation_cache.json
ADDED
|
@@ -0,0 +1,909 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"role": "user",
|
| 4 |
+
"content": "Dự đoán phương thức vận chuyển cho project ABC",
|
| 5 |
+
"timestamp": "2025-08-14T14:02:40.591810"
|
| 6 |
+
},
|
| 7 |
+
{
|
| 8 |
+
"role": "assistant",
|
| 9 |
+
"content": "Để dự đoán chính xác phương thức vận chuyển cho dự án ABC, tôi cần thêm thông tin về quốc gia đích, giá trị hàng hóa (USD) và nhà cung cấp. Vui lòng cung cấp thêm thông tin.\n",
|
| 10 |
+
"timestamp": "2025-08-14T14:02:40.591810",
|
| 11 |
+
"function_calls": [],
|
| 12 |
+
"has_functions": false
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"role": "user",
|
| 16 |
+
"content": "Previous conversations for context:\n1. User: Dự đoán phương thức vận chuyển cho project ABC → AI: Để dự đoán chính xác phương thức vận chuyển cho dự...\n\nCurrent conversation:\nDự đoán phương thức vận chuyển cho project ABC",
|
| 17 |
+
"timestamp": "2025-08-14T14:02:56.483460"
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"role": "assistant",
|
| 21 |
+
"content": "Để dự đoán chính xác phương thức vận chuyển cho dự án ABC, tôi cần thêm thông tin về: quốc gia đích, giá trị hàng hóa (USD), và nhà cung cấp. Vui lòng cung cấp các thông tin này.\n",
|
| 22 |
+
"timestamp": "2025-08-14T14:02:56.483460",
|
| 23 |
+
"function_calls": [],
|
| 24 |
+
"has_functions": false
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"role": "user",
|
| 28 |
+
"content": "Previous conversations for context:\n1. User: Dự đoán phương thức vận chuyển cho project ABC → AI: Để dự đoán chính xác phương thức vận chuyển cho dự...\n2. User: Previous conversations for context:\n1. User: Dự đo... → AI: Để dự đoán chính xác phương thức vận chuyển cho dự...\n\nCurrent conversation:\nhello",
|
| 29 |
+
"timestamp": "2025-08-14T14:04:02.194666"
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"role": "assistant",
|
| 33 |
+
"content": "Hello! How can I help you today?\n",
|
| 34 |
+
"timestamp": "2025-08-14T14:04:02.194666",
|
| 35 |
+
"function_calls": [],
|
| 36 |
+
"has_functions": false
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"role": "user",
|
| 40 |
+
"content": "Previous conversations for context:\n1. User: Dự đoán phương thức vận chuyển cho project ABC → AI: Để dự đoán chính xác phương thức vận chuyển cho dự...\n2. User: Previous conversations for context:\n1. User: Dự đo... → AI: Để dự đoán chính xác phương thức vận chuyển cho dự...\n3. User: Previous conversations for context:\n1. User: Dự đo... → AI: Hello! How can I help you today?\n\n\nCurrent conversation:\nhello",
|
| 41 |
+
"timestamp": "2025-08-14T14:04:32.228672"
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"role": "assistant",
|
| 45 |
+
"content": "Hello! How can I help you today?\n",
|
| 46 |
+
"timestamp": "2025-08-14T14:04:32.228672",
|
| 47 |
+
"function_calls": [],
|
| 48 |
+
"has_functions": false
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
"role": "user",
|
| 52 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Dự đo... → AI: Để dự đoán chính xác phương thức vận chuyển cho dự...\n2. User: Previous conversations for context:\n1. User: Dự đo... → AI: Hello! How can I help you today?\n\n3. User: Previous conversations for context:\n1. User: Dự đo... → AI: Hello! How can I help you today?\n\n\nCurrent conversation:\nhello",
|
| 53 |
+
"timestamp": "2025-08-14T14:05:49.988959"
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"role": "assistant",
|
| 57 |
+
"content": "Hello! How can I help you today?\n",
|
| 58 |
+
"timestamp": "2025-08-14T14:05:49.988959",
|
| 59 |
+
"function_calls": [],
|
| 60 |
+
"has_functions": false
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"role": "user",
|
| 64 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Dự đo... → AI: Hello! How can I help you today?\n\n2. User: Previous conversations for context:\n1. User: Dự đo... → AI: Hello! How can I help you today?\n\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Hello! How can I help you today?\n\n\nCurrent conversation:\nhello",
|
| 65 |
+
"timestamp": "2025-08-14T14:06:41.049632"
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"role": "assistant",
|
| 69 |
+
"content": "Hello! How can I help you today?\n",
|
| 70 |
+
"timestamp": "2025-08-14T14:06:41.049632",
|
| 71 |
+
"function_calls": [],
|
| 72 |
+
"has_functions": false
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"role": "user",
|
| 76 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Dự đo... → AI: Hello! How can I help you today?\n\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Hello! How can I help you today?\n\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Hello! How can I help you today?\n\n\nCurrent conversation:\nhello",
|
| 77 |
+
"timestamp": "2025-08-14T14:07:34.064344"
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"role": "assistant",
|
| 81 |
+
"content": "Hello! How can I help you today?\n",
|
| 82 |
+
"timestamp": "2025-08-14T14:07:34.064344",
|
| 83 |
+
"function_calls": [],
|
| 84 |
+
"has_functions": false
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"role": "user",
|
| 88 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Hello! How can I help you today?\n\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Hello! How can I help you today?\n\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Hello! How can I help you today?\n\n\nCurrent conversation:\nhello",
|
| 89 |
+
"timestamp": "2025-08-14T14:08:43.078560"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"role": "assistant",
|
| 93 |
+
"content": "Hello! How can I help you today?\n",
|
| 94 |
+
"timestamp": "2025-08-14T14:08:43.078560",
|
| 95 |
+
"function_calls": [],
|
| 96 |
+
"has_functions": false
|
| 97 |
+
},
|
| 98 |
+
{
|
| 99 |
+
"role": "user",
|
| 100 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Hello! How can I help you today?\n\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Hello! How can I help you today?\n\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Hello! How can I help you today?\n\n\nCurrent conversation:\nxin chào, có options gì?",
|
| 101 |
+
"timestamp": "2025-08-14T14:21:36.181245"
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"role": "assistant",
|
| 105 |
+
"content": "The available transportation options are: Air, Air Charter, Ocean, and Truck. Do you have a specific shipment in mind? Knowing the destination country, value of the goods, project code, and vendor would help me recommend the most suitable option for you.",
|
| 106 |
+
"timestamp": "2025-08-14T14:21:36.181245",
|
| 107 |
+
"function_calls": [
|
| 108 |
+
{
|
| 109 |
+
"function_name": "get_transportation_options",
|
| 110 |
+
"arguments": {},
|
| 111 |
+
"result": {
|
| 112 |
+
"shipment_modes": [
|
| 113 |
+
"Air",
|
| 114 |
+
"Air Charter",
|
| 115 |
+
"Ocean",
|
| 116 |
+
"Truck"
|
| 117 |
+
],
|
| 118 |
+
"sample_vendors": [
|
| 119 |
+
"ABBOTT LABORATORIES (PUERTO RICO)",
|
| 120 |
+
"ABBOTT LOGISTICS B.V.",
|
| 121 |
+
"ABBVIE LOGISTICS (FORMERLY ABBOTT LOGISTICS BV)",
|
| 122 |
+
"ABBVIE, SRL (FORMALLY ABBOTT LABORATORIES INTERNATIONAL CO.)",
|
| 123 |
+
"ACCESS BIO, INC.",
|
| 124 |
+
"ACOUNS NIGERIA LTD",
|
| 125 |
+
"ACTION MEDEOR E.V.",
|
| 126 |
+
"AHN (PTY) LTD (AKA UCB (S.A.)",
|
| 127 |
+
"AMSTELFARMA B.V.",
|
| 128 |
+
"ASPEN PHARMACARE"
|
| 129 |
+
],
|
| 130 |
+
"sample_countries": [
|
| 131 |
+
"Afghanistan",
|
| 132 |
+
"Angola",
|
| 133 |
+
"Belize",
|
| 134 |
+
"Benin",
|
| 135 |
+
"Botswana",
|
| 136 |
+
"Burkina Faso",
|
| 137 |
+
"Burundi",
|
| 138 |
+
"Cameroon",
|
| 139 |
+
"Congo, DRC",
|
| 140 |
+
"Côte d'Ivoire"
|
| 141 |
+
],
|
| 142 |
+
"sample_projects": [
|
| 143 |
+
"100-BJ-T30",
|
| 144 |
+
"100-BW-T01",
|
| 145 |
+
"100-BZ-T30",
|
| 146 |
+
"100-CI-T01",
|
| 147 |
+
"100-DO-T30",
|
| 148 |
+
"100-GH-T01",
|
| 149 |
+
"100-GH-T30",
|
| 150 |
+
"100-GN-T30",
|
| 151 |
+
"100-HT-T01",
|
| 152 |
+
"100-KZ-T01"
|
| 153 |
+
]
|
| 154 |
+
}
|
| 155 |
+
}
|
| 156 |
+
],
|
| 157 |
+
"has_functions": true
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"role": "user",
|
| 161 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Hello! How can I help you today?\n\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Hello! How can I help you today?\n\n3. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n\nCurrent conversation:\nxin chào, có options gì?",
|
| 162 |
+
"timestamp": "2025-08-14T14:21:38.540585"
|
| 163 |
+
},
|
| 164 |
+
{
|
| 165 |
+
"role": "assistant",
|
| 166 |
+
"content": "The available transportation options are: Air, Air Charter, Ocean, and Truck.\n",
|
| 167 |
+
"timestamp": "2025-08-14T14:21:38.540585",
|
| 168 |
+
"function_calls": [
|
| 169 |
+
{
|
| 170 |
+
"function_name": "get_transportation_options",
|
| 171 |
+
"arguments": {},
|
| 172 |
+
"result": {
|
| 173 |
+
"shipment_modes": [
|
| 174 |
+
"Air",
|
| 175 |
+
"Air Charter",
|
| 176 |
+
"Ocean",
|
| 177 |
+
"Truck"
|
| 178 |
+
],
|
| 179 |
+
"sample_vendors": [
|
| 180 |
+
"ABBOTT LABORATORIES (PUERTO RICO)",
|
| 181 |
+
"ABBOTT LOGISTICS B.V.",
|
| 182 |
+
"ABBVIE LOGISTICS (FORMERLY ABBOTT LOGISTICS BV)",
|
| 183 |
+
"ABBVIE, SRL (FORMALLY ABBOTT LABORATORIES INTERNATIONAL CO.)",
|
| 184 |
+
"ACCESS BIO, INC.",
|
| 185 |
+
"ACOUNS NIGERIA LTD",
|
| 186 |
+
"ACTION MEDEOR E.V.",
|
| 187 |
+
"AHN (PTY) LTD (AKA UCB (S.A.)",
|
| 188 |
+
"AMSTELFARMA B.V.",
|
| 189 |
+
"ASPEN PHARMACARE"
|
| 190 |
+
],
|
| 191 |
+
"sample_countries": [
|
| 192 |
+
"Afghanistan",
|
| 193 |
+
"Angola",
|
| 194 |
+
"Belize",
|
| 195 |
+
"Benin",
|
| 196 |
+
"Botswana",
|
| 197 |
+
"Burkina Faso",
|
| 198 |
+
"Burundi",
|
| 199 |
+
"Cameroon",
|
| 200 |
+
"Congo, DRC",
|
| 201 |
+
"Côte d'Ivoire"
|
| 202 |
+
],
|
| 203 |
+
"sample_projects": [
|
| 204 |
+
"100-BJ-T30",
|
| 205 |
+
"100-BW-T01",
|
| 206 |
+
"100-BZ-T30",
|
| 207 |
+
"100-CI-T01",
|
| 208 |
+
"100-DO-T30",
|
| 209 |
+
"100-GH-T01",
|
| 210 |
+
"100-GH-T30",
|
| 211 |
+
"100-GN-T30",
|
| 212 |
+
"100-HT-T01",
|
| 213 |
+
"100-KZ-T01"
|
| 214 |
+
]
|
| 215 |
+
}
|
| 216 |
+
}
|
| 217 |
+
],
|
| 218 |
+
"has_functions": true
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"role": "user",
|
| 222 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Hello! How can I help you today?\n\n2. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n\nCurrent conversation:\nxin chào, có options gì?",
|
| 223 |
+
"timestamp": "2025-08-14T14:22:03.338829"
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"role": "assistant",
|
| 227 |
+
"content": "The available transportation options are: Air, Air Charter, Ocean, and Truck. Do you have a specific shipment in mind? Knowing the destination country, value of the goods, project code, and vendor would help me recommend the most suitable option for you.",
|
| 228 |
+
"timestamp": "2025-08-14T14:22:03.338829",
|
| 229 |
+
"function_calls": [
|
| 230 |
+
{
|
| 231 |
+
"function_name": "get_transportation_options",
|
| 232 |
+
"arguments": {},
|
| 233 |
+
"result": {
|
| 234 |
+
"shipment_modes": [
|
| 235 |
+
"Air",
|
| 236 |
+
"Air Charter",
|
| 237 |
+
"Ocean",
|
| 238 |
+
"Truck"
|
| 239 |
+
],
|
| 240 |
+
"sample_vendors": [
|
| 241 |
+
"ABBOTT LABORATORIES (PUERTO RICO)",
|
| 242 |
+
"ABBOTT LOGISTICS B.V.",
|
| 243 |
+
"ABBVIE LOGISTICS (FORMERLY ABBOTT LOGISTICS BV)",
|
| 244 |
+
"ABBVIE, SRL (FORMALLY ABBOTT LABORATORIES INTERNATIONAL CO.)",
|
| 245 |
+
"ACCESS BIO, INC.",
|
| 246 |
+
"ACOUNS NIGERIA LTD",
|
| 247 |
+
"ACTION MEDEOR E.V.",
|
| 248 |
+
"AHN (PTY) LTD (AKA UCB (S.A.)",
|
| 249 |
+
"AMSTELFARMA B.V.",
|
| 250 |
+
"ASPEN PHARMACARE"
|
| 251 |
+
],
|
| 252 |
+
"sample_countries": [
|
| 253 |
+
"Afghanistan",
|
| 254 |
+
"Angola",
|
| 255 |
+
"Belize",
|
| 256 |
+
"Benin",
|
| 257 |
+
"Botswana",
|
| 258 |
+
"Burkina Faso",
|
| 259 |
+
"Burundi",
|
| 260 |
+
"Cameroon",
|
| 261 |
+
"Congo, DRC",
|
| 262 |
+
"Côte d'Ivoire"
|
| 263 |
+
],
|
| 264 |
+
"sample_projects": [
|
| 265 |
+
"100-BJ-T30",
|
| 266 |
+
"100-BW-T01",
|
| 267 |
+
"100-BZ-T30",
|
| 268 |
+
"100-CI-T01",
|
| 269 |
+
"100-DO-T30",
|
| 270 |
+
"100-GH-T01",
|
| 271 |
+
"100-GH-T30",
|
| 272 |
+
"100-GN-T30",
|
| 273 |
+
"100-HT-T01",
|
| 274 |
+
"100-KZ-T01"
|
| 275 |
+
]
|
| 276 |
+
}
|
| 277 |
+
}
|
| 278 |
+
],
|
| 279 |
+
"has_functions": true
|
| 280 |
+
},
|
| 281 |
+
{
|
| 282 |
+
"role": "user",
|
| 283 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n\nCurrent conversation:\nxin chào, có options gì?",
|
| 284 |
+
"timestamp": "2025-08-14T14:22:05.971028"
|
| 285 |
+
},
|
| 286 |
+
{
|
| 287 |
+
"role": "assistant",
|
| 288 |
+
"content": "The available transportation options are: Air, Air Charter, Ocean, and Truck. I can also provide you with a list of sample countries, projects, and vendors if that would be helpful.\n",
|
| 289 |
+
"timestamp": "2025-08-14T14:22:05.971028",
|
| 290 |
+
"function_calls": [
|
| 291 |
+
{
|
| 292 |
+
"function_name": "get_transportation_options",
|
| 293 |
+
"arguments": {},
|
| 294 |
+
"result": {
|
| 295 |
+
"shipment_modes": [
|
| 296 |
+
"Air",
|
| 297 |
+
"Air Charter",
|
| 298 |
+
"Ocean",
|
| 299 |
+
"Truck"
|
| 300 |
+
],
|
| 301 |
+
"sample_vendors": [
|
| 302 |
+
"ABBOTT LABORATORIES (PUERTO RICO)",
|
| 303 |
+
"ABBOTT LOGISTICS B.V.",
|
| 304 |
+
"ABBVIE LOGISTICS (FORMERLY ABBOTT LOGISTICS BV)",
|
| 305 |
+
"ABBVIE, SRL (FORMALLY ABBOTT LABORATORIES INTERNATIONAL CO.)",
|
| 306 |
+
"ACCESS BIO, INC.",
|
| 307 |
+
"ACOUNS NIGERIA LTD",
|
| 308 |
+
"ACTION MEDEOR E.V.",
|
| 309 |
+
"AHN (PTY) LTD (AKA UCB (S.A.)",
|
| 310 |
+
"AMSTELFARMA B.V.",
|
| 311 |
+
"ASPEN PHARMACARE"
|
| 312 |
+
],
|
| 313 |
+
"sample_countries": [
|
| 314 |
+
"Afghanistan",
|
| 315 |
+
"Angola",
|
| 316 |
+
"Belize",
|
| 317 |
+
"Benin",
|
| 318 |
+
"Botswana",
|
| 319 |
+
"Burkina Faso",
|
| 320 |
+
"Burundi",
|
| 321 |
+
"Cameroon",
|
| 322 |
+
"Congo, DRC",
|
| 323 |
+
"Côte d'Ivoire"
|
| 324 |
+
],
|
| 325 |
+
"sample_projects": [
|
| 326 |
+
"100-BJ-T30",
|
| 327 |
+
"100-BW-T01",
|
| 328 |
+
"100-BZ-T30",
|
| 329 |
+
"100-CI-T01",
|
| 330 |
+
"100-DO-T30",
|
| 331 |
+
"100-GH-T01",
|
| 332 |
+
"100-GH-T30",
|
| 333 |
+
"100-GN-T30",
|
| 334 |
+
"100-HT-T01",
|
| 335 |
+
"100-KZ-T01"
|
| 336 |
+
]
|
| 337 |
+
}
|
| 338 |
+
}
|
| 339 |
+
],
|
| 340 |
+
"has_functions": true
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"role": "user",
|
| 344 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n\nCurrent conversation:\nxin chào, có options gì?",
|
| 345 |
+
"timestamp": "2025-08-14T14:22:49.011223"
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"role": "assistant",
|
| 349 |
+
"content": "The available transportation options are: Air, Air Charter, Ocean, and Truck.",
|
| 350 |
+
"timestamp": "2025-08-14T14:22:49.011223",
|
| 351 |
+
"function_calls": [
|
| 352 |
+
{
|
| 353 |
+
"function_name": "get_transportation_options",
|
| 354 |
+
"arguments": {},
|
| 355 |
+
"result": {
|
| 356 |
+
"shipment_modes": [
|
| 357 |
+
"Air",
|
| 358 |
+
"Air Charter",
|
| 359 |
+
"Ocean",
|
| 360 |
+
"Truck"
|
| 361 |
+
],
|
| 362 |
+
"sample_vendors": [
|
| 363 |
+
"ABBOTT LABORATORIES (PUERTO RICO)",
|
| 364 |
+
"ABBOTT LOGISTICS B.V.",
|
| 365 |
+
"ABBVIE LOGISTICS (FORMERLY ABBOTT LOGISTICS BV)",
|
| 366 |
+
"ABBVIE, SRL (FORMALLY ABBOTT LABORATORIES INTERNATIONAL CO.)",
|
| 367 |
+
"ACCESS BIO, INC.",
|
| 368 |
+
"ACOUNS NIGERIA LTD",
|
| 369 |
+
"ACTION MEDEOR E.V.",
|
| 370 |
+
"AHN (PTY) LTD (AKA UCB (S.A.)",
|
| 371 |
+
"AMSTELFARMA B.V.",
|
| 372 |
+
"ASPEN PHARMACARE"
|
| 373 |
+
],
|
| 374 |
+
"sample_countries": [
|
| 375 |
+
"Afghanistan",
|
| 376 |
+
"Angola",
|
| 377 |
+
"Belize",
|
| 378 |
+
"Benin",
|
| 379 |
+
"Botswana",
|
| 380 |
+
"Burkina Faso",
|
| 381 |
+
"Burundi",
|
| 382 |
+
"Cameroon",
|
| 383 |
+
"Congo, DRC",
|
| 384 |
+
"Côte d'Ivoire"
|
| 385 |
+
],
|
| 386 |
+
"sample_projects": [
|
| 387 |
+
"100-BJ-T30",
|
| 388 |
+
"100-BW-T01",
|
| 389 |
+
"100-BZ-T30",
|
| 390 |
+
"100-CI-T01",
|
| 391 |
+
"100-DO-T30",
|
| 392 |
+
"100-GH-T01",
|
| 393 |
+
"100-GH-T30",
|
| 394 |
+
"100-GN-T30",
|
| 395 |
+
"100-HT-T01",
|
| 396 |
+
"100-KZ-T01"
|
| 397 |
+
]
|
| 398 |
+
}
|
| 399 |
+
}
|
| 400 |
+
],
|
| 401 |
+
"has_functions": true
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"role": "user",
|
| 405 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n\nCurrent conversation:\nxin chào, có options gì?",
|
| 406 |
+
"timestamp": "2025-08-14T14:22:51.580052"
|
| 407 |
+
},
|
| 408 |
+
{
|
| 409 |
+
"role": "assistant",
|
| 410 |
+
"content": "The available transportation options are: Air, Air Charter, Ocean, and Truck. Do you have a specific country, project, or vendor in mind? This will help me narrow down the best options for you.\n",
|
| 411 |
+
"timestamp": "2025-08-14T14:22:51.580052",
|
| 412 |
+
"function_calls": [
|
| 413 |
+
{
|
| 414 |
+
"function_name": "get_transportation_options",
|
| 415 |
+
"arguments": {},
|
| 416 |
+
"result": {
|
| 417 |
+
"shipment_modes": [
|
| 418 |
+
"Air",
|
| 419 |
+
"Air Charter",
|
| 420 |
+
"Ocean",
|
| 421 |
+
"Truck"
|
| 422 |
+
],
|
| 423 |
+
"sample_vendors": [
|
| 424 |
+
"ABBOTT LABORATORIES (PUERTO RICO)",
|
| 425 |
+
"ABBOTT LOGISTICS B.V.",
|
| 426 |
+
"ABBVIE LOGISTICS (FORMERLY ABBOTT LOGISTICS BV)",
|
| 427 |
+
"ABBVIE, SRL (FORMALLY ABBOTT LABORATORIES INTERNATIONAL CO.)",
|
| 428 |
+
"ACCESS BIO, INC.",
|
| 429 |
+
"ACOUNS NIGERIA LTD",
|
| 430 |
+
"ACTION MEDEOR E.V.",
|
| 431 |
+
"AHN (PTY) LTD (AKA UCB (S.A.)",
|
| 432 |
+
"AMSTELFARMA B.V.",
|
| 433 |
+
"ASPEN PHARMACARE"
|
| 434 |
+
],
|
| 435 |
+
"sample_countries": [
|
| 436 |
+
"Afghanistan",
|
| 437 |
+
"Angola",
|
| 438 |
+
"Belize",
|
| 439 |
+
"Benin",
|
| 440 |
+
"Botswana",
|
| 441 |
+
"Burkina Faso",
|
| 442 |
+
"Burundi",
|
| 443 |
+
"Cameroon",
|
| 444 |
+
"Congo, DRC",
|
| 445 |
+
"Côte d'Ivoire"
|
| 446 |
+
],
|
| 447 |
+
"sample_projects": [
|
| 448 |
+
"100-BJ-T30",
|
| 449 |
+
"100-BW-T01",
|
| 450 |
+
"100-BZ-T30",
|
| 451 |
+
"100-CI-T01",
|
| 452 |
+
"100-DO-T30",
|
| 453 |
+
"100-GH-T01",
|
| 454 |
+
"100-GH-T30",
|
| 455 |
+
"100-GN-T30",
|
| 456 |
+
"100-HT-T01",
|
| 457 |
+
"100-KZ-T01"
|
| 458 |
+
]
|
| 459 |
+
}
|
| 460 |
+
}
|
| 461 |
+
],
|
| 462 |
+
"has_functions": true
|
| 463 |
+
},
|
| 464 |
+
{
|
| 465 |
+
"role": "user",
|
| 466 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n\nCurrent conversation:\ncó options gì?",
|
| 467 |
+
"timestamp": "2025-08-14T14:24:44.758820"
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"role": "assistant",
|
| 471 |
+
"content": "The available transportation options are: Air, Air Charter, Ocean, and Truck.\n",
|
| 472 |
+
"timestamp": "2025-08-14T14:24:44.758820",
|
| 473 |
+
"function_calls": [
|
| 474 |
+
{
|
| 475 |
+
"function_name": "get_transportation_options",
|
| 476 |
+
"arguments": {},
|
| 477 |
+
"result": {
|
| 478 |
+
"shipment_modes": [
|
| 479 |
+
"Air",
|
| 480 |
+
"Air Charter",
|
| 481 |
+
"Ocean",
|
| 482 |
+
"Truck"
|
| 483 |
+
],
|
| 484 |
+
"sample_vendors": [
|
| 485 |
+
"ABBOTT LABORATORIES (PUERTO RICO)",
|
| 486 |
+
"ABBOTT LOGISTICS B.V.",
|
| 487 |
+
"ABBVIE LOGISTICS (FORMERLY ABBOTT LOGISTICS BV)",
|
| 488 |
+
"ABBVIE, SRL (FORMALLY ABBOTT LABORATORIES INTERNATIONAL CO.)",
|
| 489 |
+
"ACCESS BIO, INC.",
|
| 490 |
+
"ACOUNS NIGERIA LTD",
|
| 491 |
+
"ACTION MEDEOR E.V.",
|
| 492 |
+
"AHN (PTY) LTD (AKA UCB (S.A.)",
|
| 493 |
+
"AMSTELFARMA B.V.",
|
| 494 |
+
"ASPEN PHARMACARE"
|
| 495 |
+
],
|
| 496 |
+
"sample_countries": [
|
| 497 |
+
"Afghanistan",
|
| 498 |
+
"Angola",
|
| 499 |
+
"Belize",
|
| 500 |
+
"Benin",
|
| 501 |
+
"Botswana",
|
| 502 |
+
"Burkina Faso",
|
| 503 |
+
"Burundi",
|
| 504 |
+
"Cameroon",
|
| 505 |
+
"Congo, DRC",
|
| 506 |
+
"Côte d'Ivoire"
|
| 507 |
+
],
|
| 508 |
+
"sample_projects": [
|
| 509 |
+
"100-BJ-T30",
|
| 510 |
+
"100-BW-T01",
|
| 511 |
+
"100-BZ-T30",
|
| 512 |
+
"100-CI-T01",
|
| 513 |
+
"100-DO-T30",
|
| 514 |
+
"100-GH-T01",
|
| 515 |
+
"100-GH-T30",
|
| 516 |
+
"100-GN-T30",
|
| 517 |
+
"100-HT-T01",
|
| 518 |
+
"100-KZ-T01"
|
| 519 |
+
]
|
| 520 |
+
}
|
| 521 |
+
}
|
| 522 |
+
],
|
| 523 |
+
"has_functions": true
|
| 524 |
+
},
|
| 525 |
+
{
|
| 526 |
+
"role": "user",
|
| 527 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n\nCurrent conversation:\ncó options gì?",
|
| 528 |
+
"timestamp": "2025-08-14T14:24:46.699229"
|
| 529 |
+
},
|
| 530 |
+
{
|
| 531 |
+
"role": "assistant",
|
| 532 |
+
"content": "The available transportation options are: Air, Air Charter, Ocean, and Truck.",
|
| 533 |
+
"timestamp": "2025-08-14T14:24:46.699229",
|
| 534 |
+
"function_calls": [
|
| 535 |
+
{
|
| 536 |
+
"function_name": "get_transportation_options",
|
| 537 |
+
"arguments": {},
|
| 538 |
+
"result": {
|
| 539 |
+
"shipment_modes": [
|
| 540 |
+
"Air",
|
| 541 |
+
"Air Charter",
|
| 542 |
+
"Ocean",
|
| 543 |
+
"Truck"
|
| 544 |
+
],
|
| 545 |
+
"sample_vendors": [
|
| 546 |
+
"ABBOTT LABORATORIES (PUERTO RICO)",
|
| 547 |
+
"ABBOTT LOGISTICS B.V.",
|
| 548 |
+
"ABBVIE LOGISTICS (FORMERLY ABBOTT LOGISTICS BV)",
|
| 549 |
+
"ABBVIE, SRL (FORMALLY ABBOTT LABORATORIES INTERNATIONAL CO.)",
|
| 550 |
+
"ACCESS BIO, INC.",
|
| 551 |
+
"ACOUNS NIGERIA LTD",
|
| 552 |
+
"ACTION MEDEOR E.V.",
|
| 553 |
+
"AHN (PTY) LTD (AKA UCB (S.A.)",
|
| 554 |
+
"AMSTELFARMA B.V.",
|
| 555 |
+
"ASPEN PHARMACARE"
|
| 556 |
+
],
|
| 557 |
+
"sample_countries": [
|
| 558 |
+
"Afghanistan",
|
| 559 |
+
"Angola",
|
| 560 |
+
"Belize",
|
| 561 |
+
"Benin",
|
| 562 |
+
"Botswana",
|
| 563 |
+
"Burkina Faso",
|
| 564 |
+
"Burundi",
|
| 565 |
+
"Cameroon",
|
| 566 |
+
"Congo, DRC",
|
| 567 |
+
"Côte d'Ivoire"
|
| 568 |
+
],
|
| 569 |
+
"sample_projects": [
|
| 570 |
+
"100-BJ-T30",
|
| 571 |
+
"100-BW-T01",
|
| 572 |
+
"100-BZ-T30",
|
| 573 |
+
"100-CI-T01",
|
| 574 |
+
"100-DO-T30",
|
| 575 |
+
"100-GH-T01",
|
| 576 |
+
"100-GH-T30",
|
| 577 |
+
"100-GN-T30",
|
| 578 |
+
"100-HT-T01",
|
| 579 |
+
"100-KZ-T01"
|
| 580 |
+
]
|
| 581 |
+
}
|
| 582 |
+
},
|
| 583 |
+
{
|
| 584 |
+
"function_name": "get_transportation_options",
|
| 585 |
+
"arguments": {},
|
| 586 |
+
"result": {
|
| 587 |
+
"shipment_modes": [
|
| 588 |
+
"Air",
|
| 589 |
+
"Air Charter",
|
| 590 |
+
"Ocean",
|
| 591 |
+
"Truck"
|
| 592 |
+
],
|
| 593 |
+
"sample_vendors": [
|
| 594 |
+
"ABBOTT LABORATORIES (PUERTO RICO)",
|
| 595 |
+
"ABBOTT LOGISTICS B.V.",
|
| 596 |
+
"ABBVIE LOGISTICS (FORMERLY ABBOTT LOGISTICS BV)",
|
| 597 |
+
"ABBVIE, SRL (FORMALLY ABBOTT LABORATORIES INTERNATIONAL CO.)",
|
| 598 |
+
"ACCESS BIO, INC.",
|
| 599 |
+
"ACOUNS NIGERIA LTD",
|
| 600 |
+
"ACTION MEDEOR E.V.",
|
| 601 |
+
"AHN (PTY) LTD (AKA UCB (S.A.)",
|
| 602 |
+
"AMSTELFARMA B.V.",
|
| 603 |
+
"ASPEN PHARMACARE"
|
| 604 |
+
],
|
| 605 |
+
"sample_countries": [
|
| 606 |
+
"Afghanistan",
|
| 607 |
+
"Angola",
|
| 608 |
+
"Belize",
|
| 609 |
+
"Benin",
|
| 610 |
+
"Botswana",
|
| 611 |
+
"Burkina Faso",
|
| 612 |
+
"Burundi",
|
| 613 |
+
"Cameroon",
|
| 614 |
+
"Congo, DRC",
|
| 615 |
+
"Côte d'Ivoire"
|
| 616 |
+
],
|
| 617 |
+
"sample_projects": [
|
| 618 |
+
"100-BJ-T30",
|
| 619 |
+
"100-BW-T01",
|
| 620 |
+
"100-BZ-T30",
|
| 621 |
+
"100-CI-T01",
|
| 622 |
+
"100-DO-T30",
|
| 623 |
+
"100-GH-T01",
|
| 624 |
+
"100-GH-T30",
|
| 625 |
+
"100-GN-T30",
|
| 626 |
+
"100-HT-T01",
|
| 627 |
+
"100-KZ-T01"
|
| 628 |
+
]
|
| 629 |
+
}
|
| 630 |
+
}
|
| 631 |
+
],
|
| 632 |
+
"has_functions": true
|
| 633 |
+
},
|
| 634 |
+
{
|
| 635 |
+
"role": "user",
|
| 636 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n\nCurrent conversation:\nDự đoán phương thức vận chuyển cho project ABC",
|
| 637 |
+
"timestamp": "2025-08-14T14:25:07.830309"
|
| 638 |
+
},
|
| 639 |
+
{
|
| 640 |
+
"role": "assistant",
|
| 641 |
+
"content": "Để dự đoán chính xác phương thức vận chuyển cho dự án ABC, tôi cần thêm thông tin về: quốc gia nhận hàng, giá trị hàng hóa (USD), và nhà cung cấp. Vui lòng cung cấp các thông tin này.\n",
|
| 642 |
+
"timestamp": "2025-08-14T14:25:07.830309",
|
| 643 |
+
"function_calls": [],
|
| 644 |
+
"has_functions": false
|
| 645 |
+
},
|
| 646 |
+
{
|
| 647 |
+
"role": "user",
|
| 648 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Để dự đoán chính xác phương thức vận chuyển cho dự...\n\nCurrent conversation:\nDự đoán phương thức vận chuyển cho project ABC",
|
| 649 |
+
"timestamp": "2025-08-14T14:25:08.919754"
|
| 650 |
+
},
|
| 651 |
+
{
|
| 652 |
+
"role": "assistant",
|
| 653 |
+
"content": "Để dự đoán chính xác, cho tôi biết: quốc gia đến, giá trị hàng hóa (USD), nhà cung cấp?",
|
| 654 |
+
"timestamp": "2025-08-14T14:25:08.919754",
|
| 655 |
+
"function_calls": [],
|
| 656 |
+
"has_functions": false
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"role": "user",
|
| 660 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: The available transportation options are: Air, Air...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Để dự đoán chính xác phương thức vận chuyển cho dự...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Để dự đoán chính xác, cho tôi biết: quốc gia đến, ...\n\nCurrent conversation:\nDự đoán phương thức vận chuyển cho project ABC, giá trị hàng hóa là 500 đô, quốc gia đến là Mỹ, nhà cung cấp Aventador",
|
| 661 |
+
"timestamp": "2025-08-14T14:27:18.791250"
|
| 662 |
+
},
|
| 663 |
+
{
|
| 664 |
+
"role": "assistant",
|
| 665 |
+
"content": "Phương thức vận chuyển được dự đoán là **Air (vận chuyển hàng không)** với độ tin cậy 82.14%. Chi phí vận chuyển ước tính là 50 USD và trọng lượng ước tính là 10 kg.\n\nCác phương thức thay thế bao gồm:\n* Truck (vận tải đường bộ): 17.84%\n* Air Charter (thuê máy bay riêng): 0.01%\n* Ocean (vận tải đường biển): 0.01%\n",
|
| 666 |
+
"timestamp": "2025-08-14T14:27:18.791250",
|
| 667 |
+
"function_calls": [
|
| 668 |
+
{
|
| 669 |
+
"function_name": "predict_transportation_mode",
|
| 670 |
+
"arguments": {
|
| 671 |
+
"project_code": "ABC",
|
| 672 |
+
"pack_price": 500.0,
|
| 673 |
+
"vendor": "Aventador",
|
| 674 |
+
"country": "Mỹ"
|
| 675 |
+
},
|
| 676 |
+
"result": {
|
| 677 |
+
"predicted_shipment_mode": "Air",
|
| 678 |
+
"confidence_score": 0.8213831782341003,
|
| 679 |
+
"alternative_modes": [
|
| 680 |
+
{
|
| 681 |
+
"mode": "Truck",
|
| 682 |
+
"probability": 0.17840932309627533
|
| 683 |
+
},
|
| 684 |
+
{
|
| 685 |
+
"mode": "Air Charter",
|
| 686 |
+
"probability": 0.00013026525266468525
|
| 687 |
+
},
|
| 688 |
+
{
|
| 689 |
+
"mode": "Ocean",
|
| 690 |
+
"probability": 7.720861322013661e-05
|
| 691 |
+
}
|
| 692 |
+
],
|
| 693 |
+
"estimated_weight_kg": 10.0,
|
| 694 |
+
"estimated_freight_cost_usd": 50.0,
|
| 695 |
+
"encoded_features": {
|
| 696 |
+
"Project_Code": 0,
|
| 697 |
+
"Country": 0,
|
| 698 |
+
"Vendor": 0
|
| 699 |
+
},
|
| 700 |
+
"processing_notes": []
|
| 701 |
+
}
|
| 702 |
+
}
|
| 703 |
+
],
|
| 704 |
+
"has_functions": true
|
| 705 |
+
},
|
| 706 |
+
{
|
| 707 |
+
"role": "user",
|
| 708 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Để dự đoán chính xác phương thức vận chuyển cho dự...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Để dự đoán chính xác, cho tôi biết: quốc gia đến, ...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Phương thức vận chuyển được dự đoán là **Air (vận ...\n\nCurrent conversation:\nDự đoán phương thức vận chuyển cho project ABC, giá trị hàng hóa là 500 đô, quốc gia đến là Mỹ, nhà cung cấp Aventador",
|
| 709 |
+
"timestamp": "2025-08-14T14:27:22.525437"
|
| 710 |
+
},
|
| 711 |
+
{
|
| 712 |
+
"role": "assistant",
|
| 713 |
+
"content": "Phương thức vận chuyển được dự đoán là **Air (vận chuyển hàng không)**.\n\nChi phí vận chuyển ước tính: 50 USD.\nTrọng lượng ước tính: 10 kg.\n\nCác phương thức thay thế khác bao gồm:\n* Truck (vận chuyển đường bộ): xác suất 17.84%\n* Air Charter (thuê máy bay riêng): xác suất 0.01%\n* Ocean (vận chuyển đường biển): xác suất 0.01%",
|
| 714 |
+
"timestamp": "2025-08-14T14:27:22.525437",
|
| 715 |
+
"function_calls": [
|
| 716 |
+
{
|
| 717 |
+
"function_name": "predict_transportation_mode",
|
| 718 |
+
"arguments": {
|
| 719 |
+
"project_code": "ABC",
|
| 720 |
+
"pack_price": 500.0,
|
| 721 |
+
"vendor": "Aventador",
|
| 722 |
+
"country": "Mỹ"
|
| 723 |
+
},
|
| 724 |
+
"result": {
|
| 725 |
+
"predicted_shipment_mode": "Air",
|
| 726 |
+
"confidence_score": 0.8213831782341003,
|
| 727 |
+
"alternative_modes": [
|
| 728 |
+
{
|
| 729 |
+
"mode": "Truck",
|
| 730 |
+
"probability": 0.17840932309627533
|
| 731 |
+
},
|
| 732 |
+
{
|
| 733 |
+
"mode": "Air Charter",
|
| 734 |
+
"probability": 0.00013026525266468525
|
| 735 |
+
},
|
| 736 |
+
{
|
| 737 |
+
"mode": "Ocean",
|
| 738 |
+
"probability": 7.720861322013661e-05
|
| 739 |
+
}
|
| 740 |
+
],
|
| 741 |
+
"estimated_weight_kg": 10.0,
|
| 742 |
+
"estimated_freight_cost_usd": 50.0,
|
| 743 |
+
"encoded_features": {
|
| 744 |
+
"Project_Code": 0,
|
| 745 |
+
"Country": 0,
|
| 746 |
+
"Vendor": 0
|
| 747 |
+
},
|
| 748 |
+
"processing_notes": []
|
| 749 |
+
}
|
| 750 |
+
},
|
| 751 |
+
{
|
| 752 |
+
"function_name": "predict_transportation_mode",
|
| 753 |
+
"arguments": {
|
| 754 |
+
"project_code": "ABC",
|
| 755 |
+
"pack_price": 500.0,
|
| 756 |
+
"vendor": "Aventador",
|
| 757 |
+
"country": "Mỹ"
|
| 758 |
+
},
|
| 759 |
+
"result": {
|
| 760 |
+
"predicted_shipment_mode": "Air",
|
| 761 |
+
"confidence_score": 0.8213831782341003,
|
| 762 |
+
"alternative_modes": [
|
| 763 |
+
{
|
| 764 |
+
"mode": "Truck",
|
| 765 |
+
"probability": 0.17840932309627533
|
| 766 |
+
},
|
| 767 |
+
{
|
| 768 |
+
"mode": "Air Charter",
|
| 769 |
+
"probability": 0.00013026525266468525
|
| 770 |
+
},
|
| 771 |
+
{
|
| 772 |
+
"mode": "Ocean",
|
| 773 |
+
"probability": 7.720861322013661e-05
|
| 774 |
+
}
|
| 775 |
+
],
|
| 776 |
+
"estimated_weight_kg": 10.0,
|
| 777 |
+
"estimated_freight_cost_usd": 50.0,
|
| 778 |
+
"encoded_features": {
|
| 779 |
+
"Project_Code": 0,
|
| 780 |
+
"Country": 0,
|
| 781 |
+
"Vendor": 0
|
| 782 |
+
},
|
| 783 |
+
"processing_notes": []
|
| 784 |
+
}
|
| 785 |
+
}
|
| 786 |
+
],
|
| 787 |
+
"has_functions": true
|
| 788 |
+
},
|
| 789 |
+
{
|
| 790 |
+
"role": "user",
|
| 791 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Để dự đoán chính xác, cho tôi biết: quốc gia đến, ...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Phương thức vận chuyển được dự đoán là **Air (vận ...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Phương thức vận chuyển được dự đoán là **Air (vận ...\n\nCurrent conversation:\nHello",
|
| 792 |
+
"timestamp": "2025-08-14T22:40:53.854989"
|
| 793 |
+
},
|
| 794 |
+
{
|
| 795 |
+
"role": "assistant",
|
| 796 |
+
"content": "Chào bạn. Tôi có thể giúp gì cho bạn hôm nay?",
|
| 797 |
+
"timestamp": "2025-08-14T22:40:53.854989",
|
| 798 |
+
"function_calls": [],
|
| 799 |
+
"has_functions": false
|
| 800 |
+
},
|
| 801 |
+
{
|
| 802 |
+
"role": "user",
|
| 803 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Phương thức vận chuyển được dự đoán là **Air (vận ...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Phương thức vận chuyển được dự đoán là **Air (vận ...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Chào bạn. Tôi có thể giúp gì cho bạn hôm nay?\n\nCurrent conversation:\nHello",
|
| 804 |
+
"timestamp": "2025-08-14T22:40:55.409005"
|
| 805 |
+
},
|
| 806 |
+
{
|
| 807 |
+
"role": "assistant",
|
| 808 |
+
"content": "Chào bạn. Tôi có thể giúp gì cho bạn hôm nay?",
|
| 809 |
+
"timestamp": "2025-08-14T22:40:55.409005",
|
| 810 |
+
"function_calls": [],
|
| 811 |
+
"has_functions": false
|
| 812 |
+
},
|
| 813 |
+
{
|
| 814 |
+
"role": "user",
|
| 815 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Phương thức vận chuyển được dự đoán là **Air (vận ...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Chào bạn. Tôi có thể giúp gì cho bạn hôm nay?\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Chào bạn. Tôi có thể giúp gì cho bạn hôm nay?\n\nCurrent conversation:\nDự đoán phương thức vận chuyển cho project ABC",
|
| 816 |
+
"timestamp": "2025-08-14T23:36:03.688177"
|
| 817 |
+
},
|
| 818 |
+
{
|
| 819 |
+
"role": "assistant",
|
| 820 |
+
"content": "Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia, giá gói hàng, và nhà cung cấp.",
|
| 821 |
+
"timestamp": "2025-08-14T23:36:03.688177",
|
| 822 |
+
"function_calls": [],
|
| 823 |
+
"has_functions": false
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"role": "user",
|
| 827 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Chào bạn. Tôi có thể giúp gì cho bạn hôm nay?\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Chào bạn. Tôi có thể giúp gì cho bạn hôm nay?\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia...\n\nCurrent conversation:\nDự đoán phương thức vận chuyển cho project ABC",
|
| 828 |
+
"timestamp": "2025-08-14T23:36:05.474730"
|
| 829 |
+
},
|
| 830 |
+
{
|
| 831 |
+
"role": "assistant",
|
| 832 |
+
"content": "Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia, giá gói hàng, và nhà cung cấp.",
|
| 833 |
+
"timestamp": "2025-08-14T23:36:05.474730",
|
| 834 |
+
"function_calls": [],
|
| 835 |
+
"has_functions": false
|
| 836 |
+
},
|
| 837 |
+
{
|
| 838 |
+
"role": "user",
|
| 839 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Chào bạn. Tôi có thể giúp gì cho bạn hôm nay?\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia...\n\nCurrent conversation:\nDự đoán phương thức vận chuyển",
|
| 840 |
+
"timestamp": "2025-08-14T23:48:25.704841"
|
| 841 |
+
},
|
| 842 |
+
{
|
| 843 |
+
"role": "assistant",
|
| 844 |
+
"content": "Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia, giá gói hàng, mã dự án và nhà cung cấp.",
|
| 845 |
+
"timestamp": "2025-08-14T23:48:25.704841",
|
| 846 |
+
"function_calls": [],
|
| 847 |
+
"has_functions": false
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"role": "user",
|
| 851 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia...\n\nCurrent conversation:\nDự đoán phương thức vận chuyển",
|
| 852 |
+
"timestamp": "2025-08-14T23:48:27.485669"
|
| 853 |
+
},
|
| 854 |
+
{
|
| 855 |
+
"role": "assistant",
|
| 856 |
+
"content": "Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia, giá gói hàng, mã dự án và nhà cung cấp.",
|
| 857 |
+
"timestamp": "2025-08-14T23:48:27.485669",
|
| 858 |
+
"function_calls": [],
|
| 859 |
+
"has_functions": false
|
| 860 |
+
},
|
| 861 |
+
{
|
| 862 |
+
"role": "user",
|
| 863 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia...\n\nCurrent conversation:\nDự đoán phương thức vận chuyển cho project 100-CI-T01 đến Vietnam",
|
| 864 |
+
"timestamp": "2025-08-15T00:01:29.560265"
|
| 865 |
+
},
|
| 866 |
+
{
|
| 867 |
+
"role": "assistant",
|
| 868 |
+
"content": "Bạn vui lòng cung cấp đầy đủ thông tin về vendor và pack_price.",
|
| 869 |
+
"timestamp": "2025-08-15T00:01:29.560265",
|
| 870 |
+
"function_calls": [],
|
| 871 |
+
"has_functions": false
|
| 872 |
+
},
|
| 873 |
+
{
|
| 874 |
+
"role": "user",
|
| 875 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về vendor v...\n\nCurrent conversation:\nDự đoán phương thức vận chuyển cho project 100-CI-T01 đến Vietnam",
|
| 876 |
+
"timestamp": "2025-08-15T00:01:30.968817"
|
| 877 |
+
},
|
| 878 |
+
{
|
| 879 |
+
"role": "assistant",
|
| 880 |
+
"content": "Bạn vui lòng cung cấp đầy đủ thông tin về pack\\_price và vendor vận chuyển.",
|
| 881 |
+
"timestamp": "2025-08-15T00:01:30.968817",
|
| 882 |
+
"function_calls": [],
|
| 883 |
+
"has_functions": false
|
| 884 |
+
},
|
| 885 |
+
{
|
| 886 |
+
"role": "user",
|
| 887 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về quốc gia...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về vendor v...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về pack\\_pr...\n\nCurrent conversation:\nDự đoán phương thức vận chuyển cho project 100-CI-T01 với 50kg hàng đến Vietnam",
|
| 888 |
+
"timestamp": "2025-08-15T00:21:00.097383"
|
| 889 |
+
},
|
| 890 |
+
{
|
| 891 |
+
"role": "assistant",
|
| 892 |
+
"content": "Bạn vui lòng cung cấp đầy đủ thông tin về vendor và pack\\_price.",
|
| 893 |
+
"timestamp": "2025-08-15T00:21:00.097383",
|
| 894 |
+
"function_calls": [],
|
| 895 |
+
"has_functions": false
|
| 896 |
+
},
|
| 897 |
+
{
|
| 898 |
+
"role": "user",
|
| 899 |
+
"content": "Previous conversations for context:\n1. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về vendor v...\n2. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về pack\\_pr...\n3. User: Previous conversations for context:\n1. User: Previ... → AI: Bạn vui lòng cung cấp đầy đủ thông tin về vendor v...\n\nCurrent conversation:\nDự đoán phương thức vận chuyển cho project 100-CI-T01 với 50kg hàng đến Vietnam",
|
| 900 |
+
"timestamp": "2025-08-15T00:21:01.168117"
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"role": "assistant",
|
| 904 |
+
"content": "Bạn vui lòng cung cấp đầy đủ thông tin về vendor và pack\\_price.",
|
| 905 |
+
"timestamp": "2025-08-15T00:21:01.168117",
|
| 906 |
+
"function_calls": [],
|
| 907 |
+
"has_functions": false
|
| 908 |
+
}
|
| 909 |
+
]
|
source/docker-compose.yml
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: '3.8'
|
| 2 |
+
services:
|
| 3 |
+
|
| 4 |
+
# Backend FastAPI Service
|
| 5 |
+
backend:
|
| 6 |
+
build:
|
| 7 |
+
context: .
|
| 8 |
+
dockerfile: Dockerfile
|
| 9 |
+
ports:
|
| 10 |
+
- "3454:3454"
|
| 11 |
+
volumes:
|
| 12 |
+
- .:/app
|
| 13 |
+
environment:
|
| 14 |
+
- PYTHONUNBUFFERED=1
|
| 15 |
+
- API_BASE_URL=http://localhost:3454/api
|
| 16 |
+
restart: unless-stopped
|
| 17 |
+
command: ["uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "3454"]
|
| 18 |
+
healthcheck:
|
| 19 |
+
test: ["CMD", "curl", "-f", "http://localhost:3454/"]
|
| 20 |
+
interval: 30s
|
| 21 |
+
timeout: 10s
|
| 22 |
+
retries: 3
|
| 23 |
+
|
| 24 |
+
# Frontend Gradio Service
|
| 25 |
+
frontend:
|
| 26 |
+
build:
|
| 27 |
+
context: .
|
| 28 |
+
dockerfile: Dockerfile
|
| 29 |
+
ports:
|
| 30 |
+
- "7860:7860"
|
| 31 |
+
volumes:
|
| 32 |
+
- .:/app
|
| 33 |
+
environment:
|
| 34 |
+
- PYTHONUNBUFFERED=1
|
| 35 |
+
- API_BASE_URL=http://backend:3454/api
|
| 36 |
+
restart: unless-stopped
|
| 37 |
+
command: ["python", "app.py"]
|
| 38 |
+
depends_on:
|
| 39 |
+
- backend
|
| 40 |
+
|
| 41 |
+
# All-in-one service (alternative)
|
| 42 |
+
app:
|
| 43 |
+
build:
|
| 44 |
+
context: .
|
| 45 |
+
dockerfile: Dockerfile
|
| 46 |
+
ports:
|
| 47 |
+
- "3454:3454"
|
| 48 |
+
- "7860:7860"
|
| 49 |
+
volumes:
|
| 50 |
+
- .:/app
|
| 51 |
+
environment:
|
| 52 |
+
- PYTHONUNBUFFERED=1
|
| 53 |
+
- API_BASE_URL=http://localhost:3454/api
|
| 54 |
+
restart: unless-stopped
|
| 55 |
+
command: ["bash", "start.sh"]
|
| 56 |
+
profiles:
|
| 57 |
+
- "all-in-one"
|
source/logs/daily_20250814.log
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2025-08-15 00:01:27 - src.app.api.chat - INFO - chat.py:41 - generate_stream() - Using LangGraph native streaming with stream_mode='messages'
|
| 2 |
+
2025-08-15 00:01:28 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project 100-CI-T01 đến Vietnam...
|
| 3 |
+
2025-08-15 00:01:28 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 4 |
+
2025-08-15 00:01:28 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
source/logs/daily_20250814.log.2025-08-14
ADDED
|
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2025-08-14 14:40:41 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 2 |
+
2025-08-14 14:40:41 - test - INFO - <string>:1 - <module>() - Testing new logging system
|
| 3 |
+
2025-08-14 14:40:41 - test - WARNING - <string>:1 - <module>() - This is a warning
|
| 4 |
+
2025-08-14 14:40:41 - test - ERROR - <string>:1 - <module>() - This is an error test
|
| 5 |
+
2025-08-14 14:41:04 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 6 |
+
2025-08-14 14:41:13 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 7 |
+
2025-08-14 14:41:14 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 8 |
+
2025-08-14 14:41:14 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 9 |
+
2025-08-14 14:58:28 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 10 |
+
2025-08-14 15:01:58 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 11 |
+
2025-08-14 15:01:58 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 12 |
+
2025-08-14 15:01:59 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 13 |
+
2025-08-14 15:01:59 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 14 |
+
2025-08-14 15:02:06 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 15 |
+
2025-08-14 17:18:20 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 16 |
+
2025-08-14 17:18:21 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 17 |
+
2025-08-14 17:18:21 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 18 |
+
2025-08-14 17:18:21 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 19 |
+
2025-08-14 17:19:00 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 20 |
+
2025-08-14 17:19:00 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 21 |
+
2025-08-14 17:19:00 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 22 |
+
2025-08-14 17:19:01 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 23 |
+
2025-08-14 17:24:38 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 24 |
+
2025-08-14 17:24:38 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 25 |
+
2025-08-14 17:24:38 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 26 |
+
2025-08-14 17:24:39 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 27 |
+
2025-08-14 17:30:57 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 28 |
+
2025-08-14 17:30:57 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 29 |
+
2025-08-14 17:30:57 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 30 |
+
2025-08-14 17:30:58 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 31 |
+
2025-08-14 17:31:05 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 32 |
+
2025-08-14 17:34:43 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 33 |
+
2025-08-14 17:34:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 34 |
+
2025-08-14 17:34:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 35 |
+
2025-08-14 17:34:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 36 |
+
2025-08-14 17:35:10 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 37 |
+
2025-08-14 17:35:10 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 38 |
+
2025-08-14 17:35:10 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 39 |
+
2025-08-14 17:35:11 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 40 |
+
2025-08-14 21:20:56 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 41 |
+
2025-08-14 21:21:01 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK"
|
| 42 |
+
2025-08-14 21:21:02 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 43 |
+
2025-08-14 21:33:14 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 44 |
+
2025-08-14 21:33:18 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK"
|
| 45 |
+
2025-08-14 21:33:19 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 46 |
+
2025-08-14 21:50:16 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 47 |
+
2025-08-14 21:50:18 - google.auth.compute_engine._metadata - WARNING - _metadata.py:142 - ping() - Compute Engine Metadata server unavailable on attempt 1 of 3. Reason: [WinError 10051] A socket operation was attempted to an unreachable network
|
| 48 |
+
2025-08-14 21:50:19 - google.auth.compute_engine._metadata - WARNING - _metadata.py:142 - ping() - Compute Engine Metadata server unavailable on attempt 2 of 3. Reason: [WinError 10051] A socket operation was attempted to an unreachable network
|
| 49 |
+
2025-08-14 21:50:21 - google.auth.compute_engine._metadata - WARNING - _metadata.py:142 - ping() - Compute Engine Metadata server unavailable on attempt 3 of 3. Reason: [WinError 10051] A socket operation was attempted to an unreachable network
|
| 50 |
+
2025-08-14 21:50:21 - google.auth._default - WARNING - _default.py:362 - _get_gce_credentials() - Authentication failed using Compute Engine authentication due to unavailable metadata server.
|
| 51 |
+
2025-08-14 21:52:01 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 52 |
+
2025-08-14 21:52:03 - google.auth.compute_engine._metadata - WARNING - _metadata.py:142 - ping() - Compute Engine Metadata server unavailable on attempt 1 of 3. Reason: [WinError 10051] A socket operation was attempted to an unreachable network
|
| 53 |
+
2025-08-14 21:52:04 - google.auth.compute_engine._metadata - WARNING - _metadata.py:142 - ping() - Compute Engine Metadata server unavailable on attempt 2 of 3. Reason: [WinError 10051] A socket operation was attempted to an unreachable network
|
| 54 |
+
2025-08-14 21:52:06 - google.auth.compute_engine._metadata - WARNING - _metadata.py:142 - ping() - Compute Engine Metadata server unavailable on attempt 3 of 3. Reason: [WinError 10051] A socket operation was attempted to an unreachable network
|
| 55 |
+
2025-08-14 21:52:06 - google.auth._default - WARNING - _default.py:362 - _get_gce_credentials() - Authentication failed using Compute Engine authentication due to unavailable metadata server.
|
| 56 |
+
2025-08-14 21:52:06 - __main__ - WARNING - main.py:16 - <module>() - Chat functionality disabled due to: Your default credentials were not found. To set up Application Default Credentials, see https://cloud.google.com/docs/authentication/external/set-up-adc for more information.
|
| 57 |
+
2025-08-14 21:52:06 - __main__ - INFO - main.py:24 - <module>() - Starting Transportation Prediction API v2.0
|
| 58 |
+
2025-08-14 21:52:06 - __main__ - INFO - main.py:58 - <module>() - Starting server on localhost:3454
|
| 59 |
+
2025-08-14 22:04:45 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 60 |
+
2025-08-14 22:04:45 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 61 |
+
2025-08-14 22:04:45 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 62 |
+
2025-08-14 22:04:46 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 63 |
+
2025-08-14 22:04:46 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 64 |
+
2025-08-14 22:04:46 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 65 |
+
2025-08-14 22:04:47 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 66 |
+
2025-08-14 22:04:47 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 67 |
+
2025-08-14 22:06:44 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 68 |
+
2025-08-14 22:06:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 69 |
+
2025-08-14 22:06:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 70 |
+
2025-08-14 22:06:45 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 71 |
+
2025-08-14 22:06:45 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 72 |
+
2025-08-14 22:06:45 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 73 |
+
2025-08-14 22:06:46 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 74 |
+
2025-08-14 22:06:46 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 75 |
+
2025-08-14 22:06:48 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/v3/tunnel-request "HTTP/1.1 200 OK"
|
| 76 |
+
2025-08-14 22:06:49 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://cdn-media.huggingface.co/frpc-gradio-0.3/frpc_windows_amd64.exe "HTTP/1.1 200 OK"
|
| 77 |
+
2025-08-14 22:09:43 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 78 |
+
2025-08-14 22:09:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 79 |
+
2025-08-14 22:09:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 80 |
+
2025-08-14 22:09:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 81 |
+
2025-08-14 22:09:45 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/v3/tunnel-request "HTTP/1.1 200 OK"
|
| 82 |
+
2025-08-14 22:10:00 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 83 |
+
2025-08-14 22:16:58 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 84 |
+
2025-08-14 22:17:00 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 85 |
+
2025-08-14 22:17:00 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 86 |
+
2025-08-14 22:17:14 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 87 |
+
2025-08-14 22:17:30 - src.app.api.chat - INFO - chat.py:41 - generate_stream() - Using LangGraph native streaming with stream_mode='messages'
|
| 88 |
+
2025-08-14 22:17:30 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project ABC...
|
| 89 |
+
2025-08-14 22:17:30 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 90 |
+
2025-08-14 22:17:30 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 91 |
+
2025-08-14 22:17:31 - langchain_google_genai.chat_models - WARNING - before_sleep.py:65 - log_it() - Retrying langchain_google_genai.chat_models._chat_with_retry.<locals>._chat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 You exceeded your current quota, please check your plan and billing details. For more information on this error, head to: https://ai.google.dev/gemini-api/docs/rate-limits. [violations {
|
| 92 |
+
quota_metric: "generativelanguage.googleapis.com/generate_content_free_tier_input_token_count"
|
| 93 |
+
quota_id: "GenerateContentInputTokensPerModelPerMinute-FreeTier"
|
| 94 |
+
quota_dimensions {
|
| 95 |
+
key: "model"
|
| 96 |
+
value: "gemini-1.5-pro"
|
| 97 |
+
}
|
| 98 |
+
quota_dimensions {
|
| 99 |
+
key: "location"
|
| 100 |
+
value: "global"
|
| 101 |
+
}
|
| 102 |
+
}
|
| 103 |
+
violations {
|
| 104 |
+
quota_metric: "generativelanguage.googleapis.com/generate_content_free_tier_requests"
|
| 105 |
+
quota_id: "GenerateRequestsPerMinutePerProjectPerModel-FreeTier"
|
| 106 |
+
quota_dimensions {
|
| 107 |
+
key: "model"
|
| 108 |
+
value: "gemini-1.5-pro"
|
| 109 |
+
}
|
| 110 |
+
quota_dimensions {
|
| 111 |
+
key: "location"
|
| 112 |
+
value: "global"
|
| 113 |
+
}
|
| 114 |
+
}
|
| 115 |
+
violations {
|
| 116 |
+
quota_metric: "generativelanguage.googleapis.com/generate_content_free_tier_requests"
|
| 117 |
+
quota_id: "GenerateRequestsPerDayPerProjectPerModel-FreeTier"
|
| 118 |
+
quota_dimensions {
|
| 119 |
+
key: "model"
|
| 120 |
+
value: "gemini-1.5-pro"
|
| 121 |
+
}
|
| 122 |
+
quota_dimensions {
|
| 123 |
+
key: "location"
|
| 124 |
+
value: "global"
|
| 125 |
+
}
|
| 126 |
+
}
|
| 127 |
+
, links {
|
| 128 |
+
description: "Learn more about Gemini API quotas"
|
| 129 |
+
url: "https://ai.google.dev/gemini-api/docs/rate-limits"
|
| 130 |
+
}
|
| 131 |
+
, retry_delay {
|
| 132 |
+
seconds: 27
|
| 133 |
+
}
|
| 134 |
+
].
|
| 135 |
+
2025-08-14 22:17:33 - src.domain.node.save_cache - WARNING - save_cache.py:87 - process_save() - No complete conversation to save
|
| 136 |
+
2025-08-14 22:17:33 - src.app.api.chat - INFO - chat.py:66 - generate_stream() - Graph streaming completed. Accumulated: 0 chars
|
| 137 |
+
2025-08-14 22:17:33 - src.app.api.chat - INFO - chat.py:70 - generate_stream() - No streaming content, falling back to invoke
|
| 138 |
+
2025-08-14 22:17:33 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project ABC...
|
| 139 |
+
2025-08-14 22:17:33 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 140 |
+
2025-08-14 22:17:33 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 141 |
+
2025-08-14 22:17:34 - langchain_google_genai.chat_models - WARNING - before_sleep.py:65 - log_it() - Retrying langchain_google_genai.chat_models._chat_with_retry.<locals>._chat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 You exceeded your current quota, please check your plan and billing details. For more information on this error, head to: https://ai.google.dev/gemini-api/docs/rate-limits. [violations {
|
| 142 |
+
quota_metric: "generativelanguage.googleapis.com/generate_content_free_tier_requests"
|
| 143 |
+
quota_id: "GenerateRequestsPerDayPerProjectPerModel-FreeTier"
|
| 144 |
+
quota_dimensions {
|
| 145 |
+
key: "model"
|
| 146 |
+
value: "gemini-1.5-pro"
|
| 147 |
+
}
|
| 148 |
+
quota_dimensions {
|
| 149 |
+
key: "location"
|
| 150 |
+
value: "global"
|
| 151 |
+
}
|
| 152 |
+
}
|
| 153 |
+
violations {
|
| 154 |
+
quota_metric: "generativelanguage.googleapis.com/generate_content_free_tier_requests"
|
| 155 |
+
quota_id: "GenerateRequestsPerMinutePerProjectPerModel-FreeTier"
|
| 156 |
+
quota_dimensions {
|
| 157 |
+
key: "model"
|
| 158 |
+
value: "gemini-1.5-pro"
|
| 159 |
+
}
|
| 160 |
+
quota_dimensions {
|
| 161 |
+
key: "location"
|
| 162 |
+
value: "global"
|
| 163 |
+
}
|
| 164 |
+
}
|
| 165 |
+
violations {
|
| 166 |
+
quota_metric: "generativelanguage.googleapis.com/generate_content_free_tier_input_token_count"
|
| 167 |
+
quota_id: "GenerateContentInputTokensPerModelPerMinute-FreeTier"
|
| 168 |
+
quota_dimensions {
|
| 169 |
+
key: "model"
|
| 170 |
+
value: "gemini-1.5-pro"
|
| 171 |
+
}
|
| 172 |
+
quota_dimensions {
|
| 173 |
+
key: "location"
|
| 174 |
+
value: "global"
|
| 175 |
+
}
|
| 176 |
+
}
|
| 177 |
+
, links {
|
| 178 |
+
description: "Learn more about Gemini API quotas"
|
| 179 |
+
url: "https://ai.google.dev/gemini-api/docs/rate-limits"
|
| 180 |
+
}
|
| 181 |
+
, retry_delay {
|
| 182 |
+
seconds: 24
|
| 183 |
+
}
|
| 184 |
+
].
|
| 185 |
+
2025-08-14 22:17:36 - src.domain.node.save_cache - WARNING - save_cache.py:87 - process_save() - No complete conversation to save
|
| 186 |
+
2025-08-14 22:28:56 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 187 |
+
2025-08-14 22:28:57 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 188 |
+
2025-08-14 22:32:04 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 189 |
+
2025-08-14 22:32:05 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 190 |
+
2025-08-14 22:32:41 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 191 |
+
2025-08-14 22:33:02 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 192 |
+
2025-08-14 22:33:10 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 193 |
+
2025-08-14 22:33:12 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 194 |
+
2025-08-14 22:34:12 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 195 |
+
2025-08-14 22:36:37 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 196 |
+
2025-08-14 22:36:38 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 197 |
+
2025-08-14 22:36:56 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 198 |
+
2025-08-14 22:36:58 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 199 |
+
2025-08-14 22:39:26 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 200 |
+
2025-08-14 22:39:26 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7861/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 201 |
+
2025-08-14 22:39:26 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 500 Internal Server Error"
|
| 202 |
+
2025-08-14 22:39:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 500 Internal Server Error"
|
| 203 |
+
2025-08-14 22:39:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 204 |
+
2025-08-14 22:39:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 500 Internal Server Error"
|
| 205 |
+
2025-08-14 22:39:28 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 500 Internal Server Error"
|
| 206 |
+
2025-08-14 22:39:28 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 500 Internal Server Error"
|
| 207 |
+
2025-08-14 22:39:30 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/v3/tunnel-request "HTTP/1.1 200 OK"
|
| 208 |
+
2025-08-14 22:40:52 - src.app.api.chat - INFO - chat.py:41 - generate_stream() - Using LangGraph native streaming with stream_mode='messages'
|
| 209 |
+
2025-08-14 22:40:52 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Hello...
|
| 210 |
+
2025-08-14 22:40:52 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 211 |
+
2025-08-14 22:40:52 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 212 |
+
2025-08-14 22:40:53 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 42 messages to cache
|
| 213 |
+
2025-08-14 22:40:53 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 214 |
+
2025-08-14 22:40:53 - src.app.api.chat - INFO - chat.py:66 - generate_stream() - Graph streaming completed. Accumulated: 45 chars
|
| 215 |
+
2025-08-14 22:40:53 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Hello...
|
| 216 |
+
2025-08-14 22:40:53 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 217 |
+
2025-08-14 22:40:53 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 218 |
+
2025-08-14 22:40:55 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 44 messages to cache
|
| 219 |
+
2025-08-14 22:40:55 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 220 |
+
2025-08-14 22:41:07 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 221 |
+
2025-08-14 22:41:39 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 222 |
+
2025-08-14 22:41:41 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 223 |
+
2025-08-14 22:41:51 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 224 |
+
2025-08-14 22:41:52 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 225 |
+
2025-08-14 22:41:59 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 226 |
+
2025-08-14 22:42:00 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 227 |
+
2025-08-14 22:42:25 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 228 |
+
2025-08-14 22:42:27 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 229 |
+
2025-08-14 22:43:13 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 230 |
+
2025-08-14 22:43:14 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 231 |
+
2025-08-14 22:43:14 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 232 |
+
2025-08-14 22:43:14 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 233 |
+
2025-08-14 22:43:14 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 234 |
+
2025-08-14 22:43:15 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 235 |
+
2025-08-14 22:43:15 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 236 |
+
2025-08-14 22:43:16 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 237 |
+
2025-08-14 22:44:40 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 238 |
+
2025-08-14 22:44:42 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 239 |
+
2025-08-14 22:44:59 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 240 |
+
2025-08-14 22:45:00 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 241 |
+
2025-08-14 22:45:00 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 242 |
+
2025-08-14 22:45:00 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 243 |
+
2025-08-14 22:45:01 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 244 |
+
2025-08-14 22:45:01 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 245 |
+
2025-08-14 22:45:01 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 246 |
+
2025-08-14 22:45:02 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 247 |
+
2025-08-14 22:45:04 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/v3/tunnel-request "HTTP/1.1 200 OK"
|
| 248 |
+
2025-08-14 22:46:02 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 249 |
+
2025-08-14 22:46:03 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 250 |
+
2025-08-14 22:50:42 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 251 |
+
2025-08-14 22:51:51 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 252 |
+
2025-08-14 22:51:52 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 253 |
+
2025-08-14 22:51:52 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 254 |
+
2025-08-14 22:51:52 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 255 |
+
2025-08-14 22:51:52 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 256 |
+
2025-08-14 22:51:53 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 257 |
+
2025-08-14 22:51:53 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 258 |
+
2025-08-14 22:51:54 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 259 |
+
2025-08-14 22:51:55 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/v3/tunnel-request "HTTP/1.1 200 OK"
|
| 260 |
+
2025-08-14 22:56:37 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 261 |
+
2025-08-14 22:56:37 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 262 |
+
2025-08-14 22:56:38 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 263 |
+
2025-08-14 22:56:38 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 264 |
+
2025-08-14 22:56:38 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 265 |
+
2025-08-14 22:56:39 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 266 |
+
2025-08-14 22:56:39 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 267 |
+
2025-08-14 22:56:40 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 268 |
+
2025-08-14 22:56:41 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/v3/tunnel-request "HTTP/1.1 200 OK"
|
| 269 |
+
2025-08-14 23:00:02 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 270 |
+
2025-08-14 23:00:03 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 271 |
+
2025-08-14 23:00:13 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 272 |
+
2025-08-14 23:00:15 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 273 |
+
2025-08-14 23:01:27 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 274 |
+
2025-08-14 23:01:28 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 275 |
+
2025-08-14 23:10:00 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 276 |
+
2025-08-14 23:10:01 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 277 |
+
2025-08-14 23:10:01 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 278 |
+
2025-08-14 23:10:51 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 279 |
+
2025-08-14 23:24:26 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 280 |
+
2025-08-14 23:24:26 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
| 281 |
+
2025-08-14 23:24:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
| 282 |
+
2025-08-14 23:24:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 283 |
+
2025-08-14 23:24:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 284 |
+
2025-08-14 23:24:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 285 |
+
2025-08-14 23:24:28 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 286 |
+
2025-08-14 23:24:28 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 287 |
+
2025-08-14 23:24:29 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 288 |
+
2025-08-14 23:28:09 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 289 |
+
2025-08-14 23:28:09 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
| 290 |
+
2025-08-14 23:28:09 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
| 291 |
+
2025-08-14 23:28:10 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 292 |
+
2025-08-14 23:28:10 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 293 |
+
2025-08-14 23:28:10 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 294 |
+
2025-08-14 23:28:11 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 295 |
+
2025-08-14 23:28:11 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 296 |
+
2025-08-14 23:28:12 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 297 |
+
2025-08-14 23:35:49 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 298 |
+
2025-08-14 23:35:50 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 299 |
+
2025-08-14 23:35:50 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 300 |
+
2025-08-14 23:36:01 - src.app.api.chat - INFO - chat.py:41 - generate_stream() - Using LangGraph native streaming with stream_mode='messages'
|
| 301 |
+
2025-08-14 23:36:01 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project ABC...
|
| 302 |
+
2025-08-14 23:36:01 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 303 |
+
2025-08-14 23:36:01 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 304 |
+
2025-08-14 23:36:03 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 46 messages to cache
|
| 305 |
+
2025-08-14 23:36:03 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 306 |
+
2025-08-14 23:36:03 - src.app.api.chat - INFO - chat.py:66 - generate_stream() - Graph streaming completed. Accumulated: 82 chars
|
| 307 |
+
2025-08-14 23:36:03 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project ABC...
|
| 308 |
+
2025-08-14 23:36:03 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 309 |
+
2025-08-14 23:36:03 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 310 |
+
2025-08-14 23:36:05 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 48 messages to cache
|
| 311 |
+
2025-08-14 23:36:05 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 312 |
+
2025-08-14 23:36:25 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 313 |
+
2025-08-14 23:44:39 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 314 |
+
2025-08-14 23:44:40 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 315 |
+
2025-08-14 23:44:40 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 316 |
+
2025-08-14 23:46:00 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 317 |
+
2025-08-14 23:46:01 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 318 |
+
2025-08-14 23:46:01 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 319 |
+
2025-08-14 23:46:38 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 320 |
+
2025-08-14 23:46:39 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 321 |
+
2025-08-14 23:46:39 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 322 |
+
2025-08-14 23:48:23 - src.app.api.chat - INFO - chat.py:41 - generate_stream() - Using LangGraph native streaming with stream_mode='messages'
|
| 323 |
+
2025-08-14 23:48:23 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển...
|
| 324 |
+
2025-08-14 23:48:23 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 325 |
+
2025-08-14 23:48:23 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 326 |
+
2025-08-14 23:48:25 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 50 messages to cache
|
| 327 |
+
2025-08-14 23:48:25 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 328 |
+
2025-08-14 23:48:25 - src.app.api.chat - INFO - chat.py:65 - generate_stream() - Graph streaming completed. Accumulated: 91 chars
|
| 329 |
+
2025-08-14 23:48:25 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển...
|
| 330 |
+
2025-08-14 23:48:25 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 331 |
+
2025-08-14 23:48:25 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 332 |
+
2025-08-14 23:48:27 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 52 messages to cache
|
| 333 |
+
2025-08-14 23:48:27 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 334 |
+
2025-08-14 23:54:19 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
source/logs/errors.log
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
2025-08-14 14:40:41 - test - ERROR - <string>:1 - <module>() - This is an error test
|
source/logs/transportation.log
ADDED
|
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2025-08-14 14:40:41 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 2 |
+
2025-08-14 14:40:41 - test - INFO - <string>:1 - <module>() - Testing new logging system
|
| 3 |
+
2025-08-14 14:40:41 - test - WARNING - <string>:1 - <module>() - This is a warning
|
| 4 |
+
2025-08-14 14:40:41 - test - ERROR - <string>:1 - <module>() - This is an error test
|
| 5 |
+
2025-08-14 14:41:04 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 6 |
+
2025-08-14 14:41:13 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 7 |
+
2025-08-14 14:41:14 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 8 |
+
2025-08-14 14:41:14 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 9 |
+
2025-08-14 14:58:28 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 10 |
+
2025-08-14 15:01:58 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 11 |
+
2025-08-14 15:01:58 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 12 |
+
2025-08-14 15:01:59 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 13 |
+
2025-08-14 15:01:59 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 14 |
+
2025-08-14 15:02:06 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 15 |
+
2025-08-14 17:18:20 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 16 |
+
2025-08-14 17:18:21 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 17 |
+
2025-08-14 17:18:21 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 18 |
+
2025-08-14 17:18:21 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 19 |
+
2025-08-14 17:19:00 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 20 |
+
2025-08-14 17:19:00 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 21 |
+
2025-08-14 17:19:00 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 22 |
+
2025-08-14 17:19:01 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 23 |
+
2025-08-14 17:24:38 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 24 |
+
2025-08-14 17:24:38 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 25 |
+
2025-08-14 17:24:38 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 26 |
+
2025-08-14 17:24:39 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 27 |
+
2025-08-14 17:30:57 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 28 |
+
2025-08-14 17:30:57 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 29 |
+
2025-08-14 17:30:57 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 30 |
+
2025-08-14 17:30:58 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 31 |
+
2025-08-14 17:31:05 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 32 |
+
2025-08-14 17:34:43 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 33 |
+
2025-08-14 17:34:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 34 |
+
2025-08-14 17:34:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 35 |
+
2025-08-14 17:34:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 36 |
+
2025-08-14 17:35:10 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 37 |
+
2025-08-14 17:35:10 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 38 |
+
2025-08-14 17:35:10 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 39 |
+
2025-08-14 17:35:11 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 40 |
+
2025-08-14 21:20:56 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 41 |
+
2025-08-14 21:21:01 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK"
|
| 42 |
+
2025-08-14 21:21:02 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 43 |
+
2025-08-14 21:33:14 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 44 |
+
2025-08-14 21:33:18 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK"
|
| 45 |
+
2025-08-14 21:33:19 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 46 |
+
2025-08-14 21:50:16 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 47 |
+
2025-08-14 21:50:18 - google.auth.compute_engine._metadata - WARNING - _metadata.py:142 - ping() - Compute Engine Metadata server unavailable on attempt 1 of 3. Reason: [WinError 10051] A socket operation was attempted to an unreachable network
|
| 48 |
+
2025-08-14 21:50:19 - google.auth.compute_engine._metadata - WARNING - _metadata.py:142 - ping() - Compute Engine Metadata server unavailable on attempt 2 of 3. Reason: [WinError 10051] A socket operation was attempted to an unreachable network
|
| 49 |
+
2025-08-14 21:50:21 - google.auth.compute_engine._metadata - WARNING - _metadata.py:142 - ping() - Compute Engine Metadata server unavailable on attempt 3 of 3. Reason: [WinError 10051] A socket operation was attempted to an unreachable network
|
| 50 |
+
2025-08-14 21:50:21 - google.auth._default - WARNING - _default.py:362 - _get_gce_credentials() - Authentication failed using Compute Engine authentication due to unavailable metadata server.
|
| 51 |
+
2025-08-14 21:52:01 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 52 |
+
2025-08-14 21:52:03 - google.auth.compute_engine._metadata - WARNING - _metadata.py:142 - ping() - Compute Engine Metadata server unavailable on attempt 1 of 3. Reason: [WinError 10051] A socket operation was attempted to an unreachable network
|
| 53 |
+
2025-08-14 21:52:04 - google.auth.compute_engine._metadata - WARNING - _metadata.py:142 - ping() - Compute Engine Metadata server unavailable on attempt 2 of 3. Reason: [WinError 10051] A socket operation was attempted to an unreachable network
|
| 54 |
+
2025-08-14 21:52:06 - google.auth.compute_engine._metadata - WARNING - _metadata.py:142 - ping() - Compute Engine Metadata server unavailable on attempt 3 of 3. Reason: [WinError 10051] A socket operation was attempted to an unreachable network
|
| 55 |
+
2025-08-14 21:52:06 - google.auth._default - WARNING - _default.py:362 - _get_gce_credentials() - Authentication failed using Compute Engine authentication due to unavailable metadata server.
|
| 56 |
+
2025-08-14 21:52:06 - __main__ - WARNING - main.py:16 - <module>() - Chat functionality disabled due to: Your default credentials were not found. To set up Application Default Credentials, see https://cloud.google.com/docs/authentication/external/set-up-adc for more information.
|
| 57 |
+
2025-08-14 21:52:06 - __main__ - INFO - main.py:24 - <module>() - Starting Transportation Prediction API v2.0
|
| 58 |
+
2025-08-14 21:52:06 - __main__ - INFO - main.py:58 - <module>() - Starting server on localhost:3454
|
| 59 |
+
2025-08-14 22:04:45 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 60 |
+
2025-08-14 22:04:45 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 61 |
+
2025-08-14 22:04:45 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 62 |
+
2025-08-14 22:04:46 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 63 |
+
2025-08-14 22:04:46 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 64 |
+
2025-08-14 22:04:46 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 65 |
+
2025-08-14 22:04:47 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 66 |
+
2025-08-14 22:04:47 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 67 |
+
2025-08-14 22:06:44 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 68 |
+
2025-08-14 22:06:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 69 |
+
2025-08-14 22:06:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 70 |
+
2025-08-14 22:06:45 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 71 |
+
2025-08-14 22:06:45 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 72 |
+
2025-08-14 22:06:45 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 73 |
+
2025-08-14 22:06:46 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 74 |
+
2025-08-14 22:06:46 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 75 |
+
2025-08-14 22:06:48 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/v3/tunnel-request "HTTP/1.1 200 OK"
|
| 76 |
+
2025-08-14 22:06:49 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://cdn-media.huggingface.co/frpc-gradio-0.3/frpc_windows_amd64.exe "HTTP/1.1 200 OK"
|
| 77 |
+
2025-08-14 22:09:43 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 78 |
+
2025-08-14 22:09:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 79 |
+
2025-08-14 22:09:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 200 OK"
|
| 80 |
+
2025-08-14 22:09:44 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 81 |
+
2025-08-14 22:09:45 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/v3/tunnel-request "HTTP/1.1 200 OK"
|
| 82 |
+
2025-08-14 22:10:00 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 83 |
+
2025-08-14 22:16:58 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 84 |
+
2025-08-14 22:17:00 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 85 |
+
2025-08-14 22:17:00 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 86 |
+
2025-08-14 22:17:14 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 87 |
+
2025-08-14 22:17:30 - src.app.api.chat - INFO - chat.py:41 - generate_stream() - Using LangGraph native streaming with stream_mode='messages'
|
| 88 |
+
2025-08-14 22:17:30 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project ABC...
|
| 89 |
+
2025-08-14 22:17:30 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 90 |
+
2025-08-14 22:17:30 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 91 |
+
2025-08-14 22:17:31 - langchain_google_genai.chat_models - WARNING - before_sleep.py:65 - log_it() - Retrying langchain_google_genai.chat_models._chat_with_retry.<locals>._chat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 You exceeded your current quota, please check your plan and billing details. For more information on this error, head to: https://ai.google.dev/gemini-api/docs/rate-limits. [violations {
|
| 92 |
+
quota_metric: "generativelanguage.googleapis.com/generate_content_free_tier_input_token_count"
|
| 93 |
+
quota_id: "GenerateContentInputTokensPerModelPerMinute-FreeTier"
|
| 94 |
+
quota_dimensions {
|
| 95 |
+
key: "model"
|
| 96 |
+
value: "gemini-1.5-pro"
|
| 97 |
+
}
|
| 98 |
+
quota_dimensions {
|
| 99 |
+
key: "location"
|
| 100 |
+
value: "global"
|
| 101 |
+
}
|
| 102 |
+
}
|
| 103 |
+
violations {
|
| 104 |
+
quota_metric: "generativelanguage.googleapis.com/generate_content_free_tier_requests"
|
| 105 |
+
quota_id: "GenerateRequestsPerMinutePerProjectPerModel-FreeTier"
|
| 106 |
+
quota_dimensions {
|
| 107 |
+
key: "model"
|
| 108 |
+
value: "gemini-1.5-pro"
|
| 109 |
+
}
|
| 110 |
+
quota_dimensions {
|
| 111 |
+
key: "location"
|
| 112 |
+
value: "global"
|
| 113 |
+
}
|
| 114 |
+
}
|
| 115 |
+
violations {
|
| 116 |
+
quota_metric: "generativelanguage.googleapis.com/generate_content_free_tier_requests"
|
| 117 |
+
quota_id: "GenerateRequestsPerDayPerProjectPerModel-FreeTier"
|
| 118 |
+
quota_dimensions {
|
| 119 |
+
key: "model"
|
| 120 |
+
value: "gemini-1.5-pro"
|
| 121 |
+
}
|
| 122 |
+
quota_dimensions {
|
| 123 |
+
key: "location"
|
| 124 |
+
value: "global"
|
| 125 |
+
}
|
| 126 |
+
}
|
| 127 |
+
, links {
|
| 128 |
+
description: "Learn more about Gemini API quotas"
|
| 129 |
+
url: "https://ai.google.dev/gemini-api/docs/rate-limits"
|
| 130 |
+
}
|
| 131 |
+
, retry_delay {
|
| 132 |
+
seconds: 27
|
| 133 |
+
}
|
| 134 |
+
].
|
| 135 |
+
2025-08-14 22:17:33 - src.domain.node.save_cache - WARNING - save_cache.py:87 - process_save() - No complete conversation to save
|
| 136 |
+
2025-08-14 22:17:33 - src.app.api.chat - INFO - chat.py:66 - generate_stream() - Graph streaming completed. Accumulated: 0 chars
|
| 137 |
+
2025-08-14 22:17:33 - src.app.api.chat - INFO - chat.py:70 - generate_stream() - No streaming content, falling back to invoke
|
| 138 |
+
2025-08-14 22:17:33 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project ABC...
|
| 139 |
+
2025-08-14 22:17:33 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 140 |
+
2025-08-14 22:17:33 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 141 |
+
2025-08-14 22:17:34 - langchain_google_genai.chat_models - WARNING - before_sleep.py:65 - log_it() - Retrying langchain_google_genai.chat_models._chat_with_retry.<locals>._chat_with_retry in 2.0 seconds as it raised ResourceExhausted: 429 You exceeded your current quota, please check your plan and billing details. For more information on this error, head to: https://ai.google.dev/gemini-api/docs/rate-limits. [violations {
|
| 142 |
+
quota_metric: "generativelanguage.googleapis.com/generate_content_free_tier_requests"
|
| 143 |
+
quota_id: "GenerateRequestsPerDayPerProjectPerModel-FreeTier"
|
| 144 |
+
quota_dimensions {
|
| 145 |
+
key: "model"
|
| 146 |
+
value: "gemini-1.5-pro"
|
| 147 |
+
}
|
| 148 |
+
quota_dimensions {
|
| 149 |
+
key: "location"
|
| 150 |
+
value: "global"
|
| 151 |
+
}
|
| 152 |
+
}
|
| 153 |
+
violations {
|
| 154 |
+
quota_metric: "generativelanguage.googleapis.com/generate_content_free_tier_requests"
|
| 155 |
+
quota_id: "GenerateRequestsPerMinutePerProjectPerModel-FreeTier"
|
| 156 |
+
quota_dimensions {
|
| 157 |
+
key: "model"
|
| 158 |
+
value: "gemini-1.5-pro"
|
| 159 |
+
}
|
| 160 |
+
quota_dimensions {
|
| 161 |
+
key: "location"
|
| 162 |
+
value: "global"
|
| 163 |
+
}
|
| 164 |
+
}
|
| 165 |
+
violations {
|
| 166 |
+
quota_metric: "generativelanguage.googleapis.com/generate_content_free_tier_input_token_count"
|
| 167 |
+
quota_id: "GenerateContentInputTokensPerModelPerMinute-FreeTier"
|
| 168 |
+
quota_dimensions {
|
| 169 |
+
key: "model"
|
| 170 |
+
value: "gemini-1.5-pro"
|
| 171 |
+
}
|
| 172 |
+
quota_dimensions {
|
| 173 |
+
key: "location"
|
| 174 |
+
value: "global"
|
| 175 |
+
}
|
| 176 |
+
}
|
| 177 |
+
, links {
|
| 178 |
+
description: "Learn more about Gemini API quotas"
|
| 179 |
+
url: "https://ai.google.dev/gemini-api/docs/rate-limits"
|
| 180 |
+
}
|
| 181 |
+
, retry_delay {
|
| 182 |
+
seconds: 24
|
| 183 |
+
}
|
| 184 |
+
].
|
| 185 |
+
2025-08-14 22:17:36 - src.domain.node.save_cache - WARNING - save_cache.py:87 - process_save() - No complete conversation to save
|
| 186 |
+
2025-08-14 22:28:56 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 187 |
+
2025-08-14 22:28:57 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 188 |
+
2025-08-14 22:32:04 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 189 |
+
2025-08-14 22:32:05 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 190 |
+
2025-08-14 22:32:41 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 191 |
+
2025-08-14 22:33:02 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 192 |
+
2025-08-14 22:33:10 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 193 |
+
2025-08-14 22:33:12 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 194 |
+
2025-08-14 22:34:12 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 195 |
+
2025-08-14 22:36:37 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 196 |
+
2025-08-14 22:36:38 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 197 |
+
2025-08-14 22:36:56 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 198 |
+
2025-08-14 22:36:58 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 199 |
+
2025-08-14 22:39:26 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 200 |
+
2025-08-14 22:39:26 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7861/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 201 |
+
2025-08-14 22:39:26 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 500 Internal Server Error"
|
| 202 |
+
2025-08-14 22:39:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 500 Internal Server Error"
|
| 203 |
+
2025-08-14 22:39:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 204 |
+
2025-08-14 22:39:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 500 Internal Server Error"
|
| 205 |
+
2025-08-14 22:39:28 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 500 Internal Server Error"
|
| 206 |
+
2025-08-14 22:39:28 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 500 Internal Server Error"
|
| 207 |
+
2025-08-14 22:39:30 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/v3/tunnel-request "HTTP/1.1 200 OK"
|
| 208 |
+
2025-08-14 22:40:52 - src.app.api.chat - INFO - chat.py:41 - generate_stream() - Using LangGraph native streaming with stream_mode='messages'
|
| 209 |
+
2025-08-14 22:40:52 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Hello...
|
| 210 |
+
2025-08-14 22:40:52 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 211 |
+
2025-08-14 22:40:52 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 212 |
+
2025-08-14 22:40:53 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 42 messages to cache
|
| 213 |
+
2025-08-14 22:40:53 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 214 |
+
2025-08-14 22:40:53 - src.app.api.chat - INFO - chat.py:66 - generate_stream() - Graph streaming completed. Accumulated: 45 chars
|
| 215 |
+
2025-08-14 22:40:53 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Hello...
|
| 216 |
+
2025-08-14 22:40:53 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 217 |
+
2025-08-14 22:40:53 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 218 |
+
2025-08-14 22:40:55 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 44 messages to cache
|
| 219 |
+
2025-08-14 22:40:55 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 220 |
+
2025-08-14 22:41:07 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 221 |
+
2025-08-14 22:41:39 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 222 |
+
2025-08-14 22:41:41 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 223 |
+
2025-08-14 22:41:51 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 224 |
+
2025-08-14 22:41:52 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 225 |
+
2025-08-14 22:41:59 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 226 |
+
2025-08-14 22:42:00 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 227 |
+
2025-08-14 22:42:25 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 228 |
+
2025-08-14 22:42:27 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 229 |
+
2025-08-14 22:43:13 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 230 |
+
2025-08-14 22:43:14 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 231 |
+
2025-08-14 22:43:14 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 232 |
+
2025-08-14 22:43:14 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 233 |
+
2025-08-14 22:43:14 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 234 |
+
2025-08-14 22:43:15 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 235 |
+
2025-08-14 22:43:15 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 236 |
+
2025-08-14 22:43:16 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 237 |
+
2025-08-14 22:44:40 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 238 |
+
2025-08-14 22:44:42 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 239 |
+
2025-08-14 22:44:59 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 240 |
+
2025-08-14 22:45:00 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 241 |
+
2025-08-14 22:45:00 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 242 |
+
2025-08-14 22:45:00 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 243 |
+
2025-08-14 22:45:01 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 244 |
+
2025-08-14 22:45:01 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 245 |
+
2025-08-14 22:45:01 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 246 |
+
2025-08-14 22:45:02 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 247 |
+
2025-08-14 22:45:04 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/v3/tunnel-request "HTTP/1.1 200 OK"
|
| 248 |
+
2025-08-14 22:46:02 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 249 |
+
2025-08-14 22:46:03 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 250 |
+
2025-08-14 22:50:42 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 251 |
+
2025-08-14 22:51:51 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 252 |
+
2025-08-14 22:51:52 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 253 |
+
2025-08-14 22:51:52 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 254 |
+
2025-08-14 22:51:52 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 255 |
+
2025-08-14 22:51:52 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 256 |
+
2025-08-14 22:51:53 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 257 |
+
2025-08-14 22:51:53 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 258 |
+
2025-08-14 22:51:54 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 259 |
+
2025-08-14 22:51:55 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/v3/tunnel-request "HTTP/1.1 200 OK"
|
| 260 |
+
2025-08-14 22:56:37 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 261 |
+
2025-08-14 22:56:37 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/gradio_api/startup-events "HTTP/1.1 200 OK"
|
| 262 |
+
2025-08-14 22:56:38 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 263 |
+
2025-08-14 22:56:38 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 264 |
+
2025-08-14 22:56:38 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 265 |
+
2025-08-14 22:56:39 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 266 |
+
2025-08-14 22:56:39 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 267 |
+
2025-08-14 22:56:40 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 268 |
+
2025-08-14 22:56:41 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/v3/tunnel-request "HTTP/1.1 200 OK"
|
| 269 |
+
2025-08-14 23:00:02 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 270 |
+
2025-08-14 23:00:03 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 271 |
+
2025-08-14 23:00:13 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 272 |
+
2025-08-14 23:00:15 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 273 |
+
2025-08-14 23:01:27 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 274 |
+
2025-08-14 23:01:28 - src.main - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 275 |
+
2025-08-14 23:10:00 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 276 |
+
2025-08-14 23:10:01 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 277 |
+
2025-08-14 23:10:01 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 278 |
+
2025-08-14 23:10:51 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 279 |
+
2025-08-14 23:24:26 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 280 |
+
2025-08-14 23:24:26 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
| 281 |
+
2025-08-14 23:24:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
| 282 |
+
2025-08-14 23:24:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 283 |
+
2025-08-14 23:24:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 284 |
+
2025-08-14 23:24:27 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 285 |
+
2025-08-14 23:24:28 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 286 |
+
2025-08-14 23:24:28 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 287 |
+
2025-08-14 23:24:29 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 288 |
+
2025-08-14 23:28:09 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 289 |
+
2025-08-14 23:28:09 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
| 290 |
+
2025-08-14 23:28:09 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET http://127.0.0.1:7860/startup-events "HTTP/1.1 200 OK"
|
| 291 |
+
2025-08-14 23:28:10 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 292 |
+
2025-08-14 23:28:10 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 293 |
+
2025-08-14 23:28:10 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 294 |
+
2025-08-14 23:28:11 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 295 |
+
2025-08-14 23:28:11 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 296 |
+
2025-08-14 23:28:12 - httpx - INFO - _client.py:1025 - _send_single_request() - HTTP Request: HEAD http://127.0.0.1:7860/ "HTTP/1.1 500 Internal Server Error"
|
| 297 |
+
2025-08-14 23:35:49 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 298 |
+
2025-08-14 23:35:50 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 299 |
+
2025-08-14 23:35:50 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 300 |
+
2025-08-14 23:36:01 - src.app.api.chat - INFO - chat.py:41 - generate_stream() - Using LangGraph native streaming with stream_mode='messages'
|
| 301 |
+
2025-08-14 23:36:01 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project ABC...
|
| 302 |
+
2025-08-14 23:36:01 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 303 |
+
2025-08-14 23:36:01 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 304 |
+
2025-08-14 23:36:03 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 46 messages to cache
|
| 305 |
+
2025-08-14 23:36:03 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 306 |
+
2025-08-14 23:36:03 - src.app.api.chat - INFO - chat.py:66 - generate_stream() - Graph streaming completed. Accumulated: 82 chars
|
| 307 |
+
2025-08-14 23:36:03 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project ABC...
|
| 308 |
+
2025-08-14 23:36:03 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 309 |
+
2025-08-14 23:36:03 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 310 |
+
2025-08-14 23:36:05 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 48 messages to cache
|
| 311 |
+
2025-08-14 23:36:05 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 312 |
+
2025-08-14 23:36:25 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 313 |
+
2025-08-14 23:44:39 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 314 |
+
2025-08-14 23:44:40 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 315 |
+
2025-08-14 23:44:40 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 316 |
+
2025-08-14 23:46:00 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 317 |
+
2025-08-14 23:46:01 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 318 |
+
2025-08-14 23:46:01 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 319 |
+
2025-08-14 23:46:38 - root - INFO - logging_config.py:111 - setup_logging() - Logging system initialized successfully
|
| 320 |
+
2025-08-14 23:46:39 - __main__ - INFO - main.py:16 - <module>() - Starting Transportation Prediction API v2.0
|
| 321 |
+
2025-08-14 23:46:39 - __main__ - INFO - main.py:40 - <module>() - Starting server on localhost:3454
|
| 322 |
+
2025-08-14 23:48:23 - src.app.api.chat - INFO - chat.py:41 - generate_stream() - Using LangGraph native streaming with stream_mode='messages'
|
| 323 |
+
2025-08-14 23:48:23 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển...
|
| 324 |
+
2025-08-14 23:48:23 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 325 |
+
2025-08-14 23:48:23 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 326 |
+
2025-08-14 23:48:25 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 50 messages to cache
|
| 327 |
+
2025-08-14 23:48:25 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 328 |
+
2025-08-14 23:48:25 - src.app.api.chat - INFO - chat.py:65 - generate_stream() - Graph streaming completed. Accumulated: 91 chars
|
| 329 |
+
2025-08-14 23:48:25 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển...
|
| 330 |
+
2025-08-14 23:48:25 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 331 |
+
2025-08-14 23:48:25 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 332 |
+
2025-08-14 23:48:27 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 52 messages to cache
|
| 333 |
+
2025-08-14 23:48:27 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 334 |
+
2025-08-14 23:54:19 - transportation - INFO - predict.py:41 - load_models() - All models loaded successfully
|
| 335 |
+
2025-08-15 00:01:27 - src.app.api.chat - INFO - chat.py:41 - generate_stream() - Using LangGraph native streaming with stream_mode='messages'
|
| 336 |
+
2025-08-15 00:01:28 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project 100-CI-T01 đến Vietnam...
|
| 337 |
+
2025-08-15 00:01:28 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 338 |
+
2025-08-15 00:01:28 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 339 |
+
2025-08-15 00:01:29 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 54 messages to cache
|
| 340 |
+
2025-08-15 00:01:29 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 341 |
+
2025-08-15 00:01:29 - src.app.api.chat - INFO - chat.py:65 - generate_stream() - Graph streaming completed. Accumulated: 63 chars
|
| 342 |
+
2025-08-15 00:01:29 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project 100-CI-T01 đến Vietnam...
|
| 343 |
+
2025-08-15 00:01:29 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 344 |
+
2025-08-15 00:01:29 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 345 |
+
2025-08-15 00:01:30 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 56 messages to cache
|
| 346 |
+
2025-08-15 00:01:30 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 347 |
+
2025-08-15 00:20:58 - src.app.api.chat - INFO - chat.py:41 - generate_stream() - Using LangGraph native streaming with stream_mode='messages'
|
| 348 |
+
2025-08-15 00:20:58 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project 100-CI-T01 với 50kg hàng đến Vietnam...
|
| 349 |
+
2025-08-15 00:20:58 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 350 |
+
2025-08-15 00:20:58 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 351 |
+
2025-08-15 00:21:00 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 58 messages to cache
|
| 352 |
+
2025-08-15 00:21:00 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
| 353 |
+
2025-08-15 00:21:00 - src.app.api.chat - INFO - chat.py:65 - generate_stream() - Graph streaming completed. Accumulated: 64 chars
|
| 354 |
+
2025-08-15 00:21:00 - src.domain.node.input - INFO - input.py:64 - process_input() - Received user message: Dự đoán phương thức vận chuyển cho project 100-CI-T01 với 50kg hàng đến Vietnam...
|
| 355 |
+
2025-08-15 00:21:00 - src.domain.node.input - INFO - input.py:80 - process_input() - Added context from 10 previous conversations
|
| 356 |
+
2025-08-15 00:21:00 - src.domain.node.input - INFO - input.py:82 - process_input() - User message with context saved to state successfully
|
| 357 |
+
2025-08-15 00:21:01 - src.domain.node.save_cache - INFO - save_cache.py:41 - save_cache() - Saved 60 messages to cache
|
| 358 |
+
2025-08-15 00:21:01 - src.domain.node.save_cache - INFO - save_cache.py:85 - process_save() - Conversation saved to cache in role/content format successfully
|
source/requirements.txt
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Web Framework và API
|
| 2 |
+
fastapi==0.116.1
|
| 3 |
+
uvicorn==0.35.0
|
| 4 |
+
python-multipart==0.0.12
|
| 5 |
+
|
| 6 |
+
# Pydantic cho validation và serialization
|
| 7 |
+
pydantic==2.11.7
|
| 8 |
+
pydantic-settings==2.*
|
| 9 |
+
|
| 10 |
+
# Machine Learning Core
|
| 11 |
+
numpy==2.3.*
|
| 12 |
+
scikit-learn==1.7.1
|
| 13 |
+
xgboost==3.0.4
|
| 14 |
+
joblib==1.4.*
|
| 15 |
+
|
| 16 |
+
# LangChain và AI Integration
|
| 17 |
+
langgraph==0.6.*
|
| 18 |
+
langchain==0.3.*
|
| 19 |
+
langchain-core==0.3.*
|
| 20 |
+
langchain-google-genai==2.0.*
|
| 21 |
+
langchain-text-splitters==0.3.*
|
| 22 |
+
|
| 23 |
+
# Google AI
|
| 24 |
+
google-generativeai==0.8.*
|
| 25 |
+
|
| 26 |
+
# Configuration và Environment
|
| 27 |
+
python-dotenv==1.0.*
|
| 28 |
+
typing-extensions==4.12.*
|
| 29 |
+
|
| 30 |
+
# HTTP Requests (if needed for external APIs)
|
| 31 |
+
requests==2.32.*
|
| 32 |
+
aiohttp==3.10.*
|
| 33 |
+
|
| 34 |
+
# Data Processing
|
| 35 |
+
pandas==2.2.*
|
| 36 |
+
|
| 37 |
+
# UI Framework
|
| 38 |
+
gradio==4.44.1
|
| 39 |
+
|
| 40 |
+
# Development và Testing (optional)
|
| 41 |
+
# pytest==8.3.*
|
| 42 |
+
# pytest-asyncio==0.24.*
|
source/requirements_hf.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio==5.5.0
|
| 2 |
+
scikit-learn==1.7.1
|
| 3 |
+
xgboost==3.0.4
|
| 4 |
+
joblib==1.4.4
|
| 5 |
+
pandas==2.2.3
|
| 6 |
+
numpy==2.3.0
|
| 7 |
+
pydantic==2.11.7
|
| 8 |
+
pathlib
|
source/requirements_simple.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn[standard]
|
| 3 |
+
gradio
|
| 4 |
+
numpy
|
| 5 |
+
scikit-learn
|
| 6 |
+
xgboost
|
| 7 |
+
joblib
|
| 8 |
+
requests
|
| 9 |
+
aiohttp
|
| 10 |
+
python-dotenv
|
| 11 |
+
google-generativeai
|
| 12 |
+
pydantic
|
source/src/app/__init__.py
ADDED
|
File without changes
|
source/src/app/api/__init__.py
ADDED
|
File without changes
|
source/src/app/api/chat.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from fastapi import APIRouter, HTTPException
|
| 3 |
+
from fastapi.responses import StreamingResponse
|
| 4 |
+
import json
|
| 5 |
+
import asyncio
|
| 6 |
+
import logging
|
| 7 |
+
from src.domain.transportation_graph import create_transportation_workflow
|
| 8 |
+
from src.state.graph_state import TransportationState
|
| 9 |
+
from src.app.schema.chat import ChatRequest, StreamChunk, FunctionCall
|
| 10 |
+
from src.config.logging_config import get_logger
|
| 11 |
+
|
| 12 |
+
logger = get_logger(__name__)
|
| 13 |
+
router = APIRouter()
|
| 14 |
+
|
| 15 |
+
@router.post("/chat", response_model=None)
|
| 16 |
+
async def chat_stream(request: ChatRequest):
|
| 17 |
+
"""Streaming chat endpoint sử dụng LangGraph native streaming với LangChain model"""
|
| 18 |
+
try:
|
| 19 |
+
async def generate_stream():
|
| 20 |
+
"""Generate streaming từ LangGraph với stream_mode='messages'"""
|
| 21 |
+
# Tạo graph workflow
|
| 22 |
+
workflow = create_transportation_workflow()
|
| 23 |
+
graph = workflow.compile()
|
| 24 |
+
|
| 25 |
+
# Tạo initial state với user message
|
| 26 |
+
from src.state.graph_state import create_initial_state
|
| 27 |
+
initial_state = create_initial_state(request.message)
|
| 28 |
+
|
| 29 |
+
# Emit status starting
|
| 30 |
+
chunk = StreamChunk(
|
| 31 |
+
event="status",
|
| 32 |
+
stage="starting",
|
| 33 |
+
message="Đang xử lý câu hỏi..."
|
| 34 |
+
)
|
| 35 |
+
yield f"data: {chunk.model_dump_json()}\n\n"
|
| 36 |
+
|
| 37 |
+
try:
|
| 38 |
+
accumulated_text = ""
|
| 39 |
+
final_state = None
|
| 40 |
+
|
| 41 |
+
logger.info("Using LangGraph native streaming with stream_mode='messages'")
|
| 42 |
+
|
| 43 |
+
# Sử dụng LangGraph streaming
|
| 44 |
+
async for event in graph.astream(initial_state, stream_mode="messages"):
|
| 45 |
+
logger.debug(f"Graph event: {type(event)}")
|
| 46 |
+
|
| 47 |
+
# Check for message content from LLM
|
| 48 |
+
if isinstance(event, tuple) and len(event) == 2:
|
| 49 |
+
content_obj, metadata = event
|
| 50 |
+
|
| 51 |
+
# Check if this is from llm node
|
| 52 |
+
if (hasattr(content_obj, 'content') and
|
| 53 |
+
content_obj.content and
|
| 54 |
+
metadata.get('langgraph_node') == 'llm'):
|
| 55 |
+
|
| 56 |
+
content = content_obj.content
|
| 57 |
+
accumulated_text += content
|
| 58 |
+
chunk_obj = StreamChunk(
|
| 59 |
+
event="delta",
|
| 60 |
+
content=content
|
| 61 |
+
)
|
| 62 |
+
yield f"data: {chunk_obj.model_dump_json()}\n\n"
|
| 63 |
+
await asyncio.sleep(0.02)
|
| 64 |
+
|
| 65 |
+
logger.info(f"Graph streaming completed. Accumulated: {len(accumulated_text)} chars")
|
| 66 |
+
|
| 67 |
+
# Nếu không có streaming content, fallback to invoke
|
| 68 |
+
if not accumulated_text.strip():
|
| 69 |
+
logger.info("No streaming content, falling back to invoke")
|
| 70 |
+
final_state = graph.invoke(initial_state)
|
| 71 |
+
|
| 72 |
+
if 'ai_response' in final_state and final_state['ai_response']:
|
| 73 |
+
# Simulate streaming effect
|
| 74 |
+
words = final_state['ai_response'].split()
|
| 75 |
+
for word in words:
|
| 76 |
+
accumulated_text += word + " "
|
| 77 |
+
chunk_obj = StreamChunk(
|
| 78 |
+
event="delta",
|
| 79 |
+
content=word + " "
|
| 80 |
+
)
|
| 81 |
+
yield f"data: {chunk_obj.model_dump_json()}\n\n"
|
| 82 |
+
await asyncio.sleep(0.05)
|
| 83 |
+
accumulated_text = accumulated_text.strip()
|
| 84 |
+
else:
|
| 85 |
+
accumulated_text = "Đã xảy ra lỗi khi xử lý yêu cầu."
|
| 86 |
+
else:
|
| 87 |
+
# Get final state for function calls and cache
|
| 88 |
+
try:
|
| 89 |
+
final_state = graph.invoke(initial_state)
|
| 90 |
+
except Exception as e:
|
| 91 |
+
logger.warning(f"Graph invoke for final state failed: {e}")
|
| 92 |
+
|
| 93 |
+
# Include function calls nếu có (tạm thời bỏ qua để đơn giản)
|
| 94 |
+
# if final_state and 'function_calls_made' in final_state and final_state['function_calls_made']:
|
| 95 |
+
# pass # Có thể thêm function calls event sau
|
| 96 |
+
|
| 97 |
+
final_response = accumulated_text if accumulated_text.strip() else "Không có phản hồi"
|
| 98 |
+
|
| 99 |
+
except Exception as e:
|
| 100 |
+
logger.error(f"Graph streaming error: {e}")
|
| 101 |
+
chunk = StreamChunk(
|
| 102 |
+
event="status",
|
| 103 |
+
stage="error",
|
| 104 |
+
message=f"Streaming Error: {str(e)}"
|
| 105 |
+
)
|
| 106 |
+
yield f"data: {chunk.model_dump_json()}\n\n"
|
| 107 |
+
final_response = "Đã xảy ra lỗi trong quá trình xử lý"
|
| 108 |
+
|
| 109 |
+
# Kết thúc stream
|
| 110 |
+
final_chunk = StreamChunk(
|
| 111 |
+
event="final",
|
| 112 |
+
content=final_response
|
| 113 |
+
)
|
| 114 |
+
yield f"data: {final_chunk.model_dump_json()}\n\n"
|
| 115 |
+
yield "data: [DONE]\n\n"
|
| 116 |
+
|
| 117 |
+
return StreamingResponse(
|
| 118 |
+
generate_stream(),
|
| 119 |
+
media_type="text/event-stream",
|
| 120 |
+
headers={
|
| 121 |
+
"Cache-Control": "no-cache",
|
| 122 |
+
"Connection": "keep-alive",
|
| 123 |
+
"Access-Control-Allow-Origin": "*",
|
| 124 |
+
"Access-Control-Allow-Headers": "Cache-Control"
|
| 125 |
+
}
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
except Exception as e:
|
| 129 |
+
raise HTTPException(status_code=500, detail=f"Streaming failed: {str(e)}")
|
source/src/app/api/predict.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, HTTPException
|
| 2 |
+
from src.app.schema.transportation import TransportationRequest, TransportationResponse
|
| 3 |
+
import pickle
|
| 4 |
+
import joblib
|
| 5 |
+
import numpy as np
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
import logging
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
import warnings
|
| 10 |
+
from sklearn.exceptions import InconsistentVersionWarning
|
| 11 |
+
from src.config.logging_config import get_logger
|
| 12 |
+
|
| 13 |
+
# Suppress sklearn version warning for model compatibility
|
| 14 |
+
warnings.filterwarnings("ignore", category=InconsistentVersionWarning)
|
| 15 |
+
|
| 16 |
+
router = APIRouter()
|
| 17 |
+
logger = get_logger("transportation")
|
| 18 |
+
|
| 19 |
+
class TransportationPredictor:
|
| 20 |
+
def __init__(self):
|
| 21 |
+
self.model = None
|
| 22 |
+
self.label_encoders = None
|
| 23 |
+
self.shipment_encoders = None
|
| 24 |
+
self.scaler = None
|
| 25 |
+
self.load_models()
|
| 26 |
+
|
| 27 |
+
def _download_file(self, url, token=None):
|
| 28 |
+
import requests
|
| 29 |
+
headers = {"Authorization": f"Bearer {token}"} if token else {}
|
| 30 |
+
response = requests.get(url, headers=headers)
|
| 31 |
+
response.raise_for_status()
|
| 32 |
+
from io import BytesIO
|
| 33 |
+
return BytesIO(response.content)
|
| 34 |
+
|
| 35 |
+
def load_models(self):
|
| 36 |
+
"""Load tất cả models từ Hugging Face."""
|
| 37 |
+
try:
|
| 38 |
+
from src.config.setting import settings
|
| 39 |
+
base_url = settings.HF_MODEL_BASE_URL
|
| 40 |
+
files = settings.HF_MODEL_FILES
|
| 41 |
+
token = settings.HF_TOKEN
|
| 42 |
+
with warnings.catch_warnings():
|
| 43 |
+
warnings.filterwarnings("ignore", category=InconsistentVersionWarning)
|
| 44 |
+
self.model = pickle.load(self._download_file(base_url + files["xgboost_model"], token))
|
| 45 |
+
self.label_encoders = joblib.load(self._download_file(base_url + files["label_encoders"], token))
|
| 46 |
+
self.shipment_encoders = joblib.load(self._download_file(base_url + files["shipment_encoders"], token))
|
| 47 |
+
self.scaler = joblib.load(self._download_file(base_url + files["scaler"], token))
|
| 48 |
+
logger.info("All models loaded successfully from Hugging Face")
|
| 49 |
+
except Exception as e:
|
| 50 |
+
raise RuntimeError(f"Không thể load models từ Hugging Face: {e}")
|
| 51 |
+
|
| 52 |
+
def _encode_safe(self, encoder, value: str) -> int:
|
| 53 |
+
"""Encode an toàn với fallback - convert to Python int."""
|
| 54 |
+
try:
|
| 55 |
+
if encoder and value in encoder.classes_:
|
| 56 |
+
result = encoder.transform([value])[0]
|
| 57 |
+
return int(result) # Convert numpy int to Python int
|
| 58 |
+
return 0
|
| 59 |
+
except:
|
| 60 |
+
return 0
|
| 61 |
+
|
| 62 |
+
def predict_shipment_mode(self, request: TransportationRequest) -> TransportationResponse:
|
| 63 |
+
"""Dự đoán phương thức vận chuyển."""
|
| 64 |
+
try:
|
| 65 |
+
# Ước tính weight và freight nếu thiếu
|
| 66 |
+
weight = request.weight_kg or (request.line_item_quantity or 100) * 0.1
|
| 67 |
+
freight = request.freight_cost_usd or max(weight * 3, request.pack_price * 0.04, 50)
|
| 68 |
+
|
| 69 |
+
# Encode features - convert all to Python int
|
| 70 |
+
project_encoded = self._encode_safe(self.shipment_encoders.get('Project Code'), request.project_code)
|
| 71 |
+
country_encoded = self._encode_safe(self.shipment_encoders.get('Country'), request.country)
|
| 72 |
+
vendor_encoded = self._encode_safe(self.shipment_encoders.get('Vendor'), request.vendor)
|
| 73 |
+
|
| 74 |
+
# Parse date
|
| 75 |
+
try:
|
| 76 |
+
date_ordinal = datetime.strptime(request.delivery_date, "%Y-%m-%d").toordinal() if request.delivery_date else datetime.now().toordinal()
|
| 77 |
+
except:
|
| 78 |
+
date_ordinal = datetime.now().toordinal()
|
| 79 |
+
|
| 80 |
+
# Tạo feature vector
|
| 81 |
+
features = [0, project_encoded, country_encoded, request.pack_price, vendor_encoded, freight, weight, date_ordinal]
|
| 82 |
+
|
| 83 |
+
# Dự đoán
|
| 84 |
+
prediction = self.model.predict(np.array(features).reshape(1, -1))
|
| 85 |
+
probabilities = self.model.predict_proba(np.array(features).reshape(1, -1))[0]
|
| 86 |
+
|
| 87 |
+
classes = ['Air', 'Air Charter', 'Ocean', 'Truck']
|
| 88 |
+
predicted_idx = int(prediction[0])
|
| 89 |
+
predicted_mode = classes[predicted_idx]
|
| 90 |
+
confidence = float(probabilities[predicted_idx])
|
| 91 |
+
|
| 92 |
+
# Alternatives - convert all numpy types to Python types
|
| 93 |
+
alternatives = []
|
| 94 |
+
for i in range(len(classes)):
|
| 95 |
+
if i != predicted_idx:
|
| 96 |
+
alternatives.append({
|
| 97 |
+
'mode': classes[i],
|
| 98 |
+
'probability': float(probabilities[i])
|
| 99 |
+
})
|
| 100 |
+
alternatives.sort(key=lambda x: x['probability'], reverse=True)
|
| 101 |
+
|
| 102 |
+
return TransportationResponse(
|
| 103 |
+
predicted_shipment_mode=predicted_mode,
|
| 104 |
+
confidence_score=confidence,
|
| 105 |
+
alternative_modes=alternatives,
|
| 106 |
+
estimated_weight_kg=float(weight) if not request.weight_kg else None,
|
| 107 |
+
estimated_freight_cost_usd=float(freight) if not request.freight_cost_usd else None,
|
| 108 |
+
encoded_features={
|
| 109 |
+
'Project_Code': int(project_encoded),
|
| 110 |
+
'Country': int(country_encoded),
|
| 111 |
+
'Vendor': int(vendor_encoded)
|
| 112 |
+
},
|
| 113 |
+
processing_notes=[]
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
except Exception as e:
|
| 117 |
+
raise HTTPException(status_code=500, detail=f"Prediction failed: {e}")
|
| 118 |
+
|
| 119 |
+
# Global instance
|
| 120 |
+
predictor = None
|
| 121 |
+
|
| 122 |
+
def get_predictor():
|
| 123 |
+
global predictor
|
| 124 |
+
if not predictor:
|
| 125 |
+
predictor = TransportationPredictor()
|
| 126 |
+
return predictor
|
| 127 |
+
|
| 128 |
+
@router.post('/predict-transportation', response_model=TransportationResponse)
|
| 129 |
+
def predict_transportation(request: TransportationRequest):
|
| 130 |
+
"""Dự đoán phương thức vận chuyển tối ưu."""
|
| 131 |
+
return get_predictor().predict_shipment_mode(request)
|
| 132 |
+
|
| 133 |
+
@router.get('/transportation-options')
|
| 134 |
+
def get_transportation_options():
|
| 135 |
+
"""Lấy danh sách các tùy chọn có thể cho các trường input."""
|
| 136 |
+
try:
|
| 137 |
+
predictor = get_predictor()
|
| 138 |
+
return {
|
| 139 |
+
"shipment_modes": ["Air", "Air Charter", "Ocean", "Truck"],
|
| 140 |
+
"sample_vendors": list(predictor.shipment_encoders['Vendor'].classes_[:10]) if predictor.shipment_encoders else [],
|
| 141 |
+
"sample_countries": list(predictor.shipment_encoders['Country'].classes_[:10]) if predictor.shipment_encoders else [],
|
| 142 |
+
"sample_projects": list(predictor.shipment_encoders['Project Code'].classes_[:10]) if predictor.shipment_encoders else []
|
| 143 |
+
}
|
| 144 |
+
except Exception as e:
|
| 145 |
+
return {"error": str(e)}
|
source/src/app/schema/__init__.py
ADDED
|
File without changes
|
source/src/app/schema/chat.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Chat API schemas for transportation application
|
| 3 |
+
"""
|
| 4 |
+
from pydantic import BaseModel, Field
|
| 5 |
+
from typing import List, Dict, Any, Optional
|
| 6 |
+
|
| 7 |
+
class ChatRequest(BaseModel):
|
| 8 |
+
"""Schema cho chat request"""
|
| 9 |
+
message: str = Field(..., description="Tin nhắn từ người dùng", example="Dự đoán phương thức vận chuyển cho project ABC")
|
| 10 |
+
|
| 11 |
+
class FunctionCall(BaseModel):
|
| 12 |
+
"""Schema cho function call"""
|
| 13 |
+
function_name: str = Field(..., description="Tên function được gọi")
|
| 14 |
+
arguments: Dict[str, Any] = Field(..., description="Tham số của function")
|
| 15 |
+
result: Dict[str, Any] = Field(..., description="Kết quả của function")
|
| 16 |
+
|
| 17 |
+
class ChatResponse(BaseModel):
|
| 18 |
+
"""Schema cho chat response"""
|
| 19 |
+
response: str = Field(..., description="Phản hồi từ AI")
|
| 20 |
+
function_calls: List[FunctionCall] = Field(default_factory=list, description="Các function calls đã thực hiện")
|
| 21 |
+
status: str = Field(default="success", description="Trạng thái response")
|
| 22 |
+
error_message: Optional[str] = Field(None, description="Thông báo lỗi nếu có")
|
| 23 |
+
|
| 24 |
+
class StreamChunk(BaseModel):
|
| 25 |
+
"""Schema cho streaming chunk với định dạng gọn"""
|
| 26 |
+
event: str = Field(..., description="Loại event: status, delta, final")
|
| 27 |
+
stage: Optional[str] = Field(None, description="Stage cho status event: starting, processing, completed")
|
| 28 |
+
message: Optional[str] = Field(None, description="Message cho status event")
|
| 29 |
+
content: Optional[str] = Field(None, description="Nội dung cho delta và final event")
|
source/src/app/schema/transportation.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
from pydantic import BaseModel, Field
|
| 3 |
+
|
| 4 |
+
class TransportationRequest(BaseModel):
|
| 5 |
+
"""Schema cho yêu cầu dự đoán phương thức vận chuyển."""
|
| 6 |
+
|
| 7 |
+
# Thông tin bắt buộc
|
| 8 |
+
project_code: str = Field(..., description="Mã dự án", example="100-CI-T01")
|
| 9 |
+
country: str = Field(..., description="Quốc gia đích", example="Vietnam")
|
| 10 |
+
pack_price: float = Field(..., description="Giá mỗi gói (USD)", example=50.0)
|
| 11 |
+
vendor: str = Field(..., description="Nhà cung cấp", example="ABBOTT LABORATORIES")
|
| 12 |
+
|
| 13 |
+
# Thông tin tùy chọn (sẽ ước tính nếu không có)
|
| 14 |
+
weight_kg: Optional[float] = Field(None, description="Khối lượng (kg)", example=25.0)
|
| 15 |
+
freight_cost_usd: Optional[float] = Field(None, description="Chi phí vận chuyển (USD)", example=500.0)
|
| 16 |
+
delivery_date: Optional[str] = Field(None, description="Ngày giao hàng (YYYY-MM-DD)", example="2025-08-20")
|
| 17 |
+
line_item_quantity: Optional[float] = Field(100.0, description="Số lượng để ước tính weight", example=100.0)
|
| 18 |
+
|
| 19 |
+
class TransportationResponse(BaseModel):
|
| 20 |
+
"""Schema cho kết quả dự đoán phương thức vận chuyển."""
|
| 21 |
+
|
| 22 |
+
predicted_shipment_mode: str = Field(..., description="Phương thức vận chuyển đề xuất")
|
| 23 |
+
confidence_score: float = Field(..., description="Độ tin cậy (0-1)")
|
| 24 |
+
alternative_modes: list = Field(..., description="Các phương thức khác")
|
| 25 |
+
|
| 26 |
+
# Thông tin bổ sung
|
| 27 |
+
estimated_weight_kg: Optional[float] = Field(None, description="Khối lượng ước tính")
|
| 28 |
+
estimated_freight_cost_usd: Optional[float] = Field(None, description="Chi phí ước tính")
|
| 29 |
+
encoded_features: Optional[dict] = Field(None, description="Features encoded")
|
| 30 |
+
processing_notes: Optional[list] = Field(None, description="Ghi chú")
|
source/src/config/__init__.py
ADDED
|
File without changes
|
source/src/config/logging_config.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Logging Configuration Module
|
| 3 |
+
Cấu hình logging tập trung cho toàn bộ ứng dụng
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
import logging.handlers
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
from typing import Optional
|
| 11 |
+
|
| 12 |
+
from .setting import settings
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class LoggerSetup:
|
| 16 |
+
"""Cấu hình logging cho ứng dụng"""
|
| 17 |
+
|
| 18 |
+
def __init__(self):
|
| 19 |
+
self.log_dir = Path(__file__).resolve().parents[2] / "logs"
|
| 20 |
+
self.log_dir.mkdir(exist_ok=True)
|
| 21 |
+
self.setup_done = False
|
| 22 |
+
|
| 23 |
+
def setup_logging(self,
|
| 24 |
+
log_level: Optional[str] = None,
|
| 25 |
+
enable_file_logging: bool = True,
|
| 26 |
+
enable_console_logging: bool = True) -> None:
|
| 27 |
+
"""
|
| 28 |
+
Cấu hình logging cho toàn bộ ứng dụng
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
log_level: Mức độ log (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
| 32 |
+
enable_file_logging: Có ghi log ra file không
|
| 33 |
+
enable_console_logging: Có hiển thị log trên console không
|
| 34 |
+
"""
|
| 35 |
+
if self.setup_done:
|
| 36 |
+
return
|
| 37 |
+
|
| 38 |
+
# Sử dụng log level từ settings nếu không được truyền vào
|
| 39 |
+
if log_level is None:
|
| 40 |
+
log_level = settings.LOG_LEVEL
|
| 41 |
+
|
| 42 |
+
# Cấu hình root logger
|
| 43 |
+
root_logger = logging.getLogger()
|
| 44 |
+
root_logger.setLevel(getattr(logging, log_level.upper()))
|
| 45 |
+
|
| 46 |
+
# Xóa các handler cũ nếu có
|
| 47 |
+
for handler in root_logger.handlers[:]:
|
| 48 |
+
root_logger.removeHandler(handler)
|
| 49 |
+
|
| 50 |
+
# Định dạng log message
|
| 51 |
+
formatter = logging.Formatter(
|
| 52 |
+
'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
| 53 |
+
datefmt='%Y-%m-%d %H:%M:%S'
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
detailed_formatter = logging.Formatter(
|
| 57 |
+
'%(asctime)s - %(name)s - %(levelname)s - %(filename)s:%(lineno)d - %(funcName)s() - %(message)s',
|
| 58 |
+
datefmt='%Y-%m-%d %H:%M:%S'
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# Console handler
|
| 62 |
+
if enable_console_logging:
|
| 63 |
+
console_handler = logging.StreamHandler()
|
| 64 |
+
console_handler.setLevel(getattr(logging, log_level.upper()))
|
| 65 |
+
console_handler.setFormatter(formatter)
|
| 66 |
+
root_logger.addHandler(console_handler)
|
| 67 |
+
|
| 68 |
+
# File handlers
|
| 69 |
+
if enable_file_logging:
|
| 70 |
+
# General log file (with rotation)
|
| 71 |
+
general_log_file = self.log_dir / "transportation.log"
|
| 72 |
+
file_handler = logging.handlers.RotatingFileHandler(
|
| 73 |
+
general_log_file,
|
| 74 |
+
maxBytes=10*1024*1024, # 10MB
|
| 75 |
+
backupCount=5,
|
| 76 |
+
encoding='utf-8'
|
| 77 |
+
)
|
| 78 |
+
file_handler.setLevel(logging.INFO)
|
| 79 |
+
file_handler.setFormatter(detailed_formatter)
|
| 80 |
+
root_logger.addHandler(file_handler)
|
| 81 |
+
|
| 82 |
+
# Error log file (only errors and above)
|
| 83 |
+
error_log_file = self.log_dir / "errors.log"
|
| 84 |
+
error_handler = logging.handlers.RotatingFileHandler(
|
| 85 |
+
error_log_file,
|
| 86 |
+
maxBytes=10*1024*1024, # 10MB
|
| 87 |
+
backupCount=3,
|
| 88 |
+
encoding='utf-8'
|
| 89 |
+
)
|
| 90 |
+
error_handler.setLevel(logging.ERROR)
|
| 91 |
+
error_handler.setFormatter(detailed_formatter)
|
| 92 |
+
root_logger.addHandler(error_handler)
|
| 93 |
+
|
| 94 |
+
# Daily log file (one file per day)
|
| 95 |
+
daily_log_file = self.log_dir / f"daily_{datetime.now().strftime('%Y%m%d')}.log"
|
| 96 |
+
daily_handler = logging.handlers.TimedRotatingFileHandler(
|
| 97 |
+
daily_log_file,
|
| 98 |
+
when='midnight',
|
| 99 |
+
interval=1,
|
| 100 |
+
backupCount=30, # Keep 30 days
|
| 101 |
+
encoding='utf-8'
|
| 102 |
+
)
|
| 103 |
+
daily_handler.setLevel(logging.DEBUG)
|
| 104 |
+
daily_handler.setFormatter(detailed_formatter)
|
| 105 |
+
root_logger.addHandler(daily_handler)
|
| 106 |
+
|
| 107 |
+
# Cấu hình cho các logger cụ thể
|
| 108 |
+
self._configure_specific_loggers()
|
| 109 |
+
|
| 110 |
+
self.setup_done = True
|
| 111 |
+
logging.info("Logging system initialized successfully")
|
| 112 |
+
|
| 113 |
+
def _configure_specific_loggers(self):
|
| 114 |
+
"""Cấu hình các logger cho các module cụ thể"""
|
| 115 |
+
|
| 116 |
+
# FastAPI và Uvicorn
|
| 117 |
+
logging.getLogger("uvicorn").setLevel(logging.INFO)
|
| 118 |
+
logging.getLogger("uvicorn.access").setLevel(logging.WARNING)
|
| 119 |
+
logging.getLogger("fastapi").setLevel(logging.INFO)
|
| 120 |
+
|
| 121 |
+
# Transportation specific loggers
|
| 122 |
+
logging.getLogger("transportation").setLevel(logging.INFO)
|
| 123 |
+
logging.getLogger("prediction").setLevel(logging.INFO)
|
| 124 |
+
logging.getLogger("chat").setLevel(logging.INFO)
|
| 125 |
+
|
| 126 |
+
# External libraries
|
| 127 |
+
logging.getLogger("requests").setLevel(logging.WARNING)
|
| 128 |
+
logging.getLogger("urllib3").setLevel(logging.WARNING)
|
| 129 |
+
logging.getLogger("google").setLevel(logging.WARNING)
|
| 130 |
+
|
| 131 |
+
def get_logger(self, name: str) -> logging.Logger:
|
| 132 |
+
"""
|
| 133 |
+
Lấy logger với tên cụ thể
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
name: Tên của logger (thường là __name__)
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
Logger instance
|
| 140 |
+
"""
|
| 141 |
+
if not self.setup_done:
|
| 142 |
+
self.setup_logging()
|
| 143 |
+
|
| 144 |
+
return logging.getLogger(name)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
# Global logger setup instance
|
| 148 |
+
logger_setup = LoggerSetup()
|
| 149 |
+
|
| 150 |
+
def get_logger(name: str) -> logging.Logger:
|
| 151 |
+
"""
|
| 152 |
+
Convenience function để lấy logger
|
| 153 |
+
|
| 154 |
+
Usage:
|
| 155 |
+
from src.config.logging_config import get_logger
|
| 156 |
+
logger = get_logger(__name__)
|
| 157 |
+
"""
|
| 158 |
+
return logger_setup.get_logger(name)
|
| 159 |
+
|
| 160 |
+
def setup_logging(**kwargs):
|
| 161 |
+
"""
|
| 162 |
+
Convenience function để setup logging
|
| 163 |
+
|
| 164 |
+
Usage:
|
| 165 |
+
from src.config.logging_config import setup_logging
|
| 166 |
+
setup_logging(log_level="DEBUG")
|
| 167 |
+
"""
|
| 168 |
+
return logger_setup.setup_logging(**kwargs)
|
source/src/config/setting.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
from typing import Optional
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
|
| 7 |
+
# Load .env file từ project root
|
| 8 |
+
project_root = Path(__file__).resolve().parents[2]
|
| 9 |
+
env_path = project_root / ".env"
|
| 10 |
+
load_dotenv(env_path)
|
| 11 |
+
|
| 12 |
+
class Settings:
|
| 13 |
+
|
| 14 |
+
# API Settings (đơn giản)
|
| 15 |
+
API_BASE_URL: str = os.getenv("API_BASE_URL", "http://localhost:3454/api")
|
| 16 |
+
|
| 17 |
+
# Gemini AI Settings
|
| 18 |
+
GEMINI_API_KEY: Optional[str] = os.getenv("GEMINI_API_KEY")
|
| 19 |
+
|
| 20 |
+
# Model URLs (Hugging Face)
|
| 21 |
+
HF_MODEL_BASE_URL: str = "https://huggingface.co/NGOC1712/transportation-models/resolve/main/"
|
| 22 |
+
HF_MODEL_FILES: dict = {
|
| 23 |
+
"xgboost_model": "xgboost_model.pkl",
|
| 24 |
+
"label_encoders": "all_label_encoders.joblib",
|
| 25 |
+
"shipment_encoders": "shipment_label_encoders.pkl",
|
| 26 |
+
"scaler": "scaler_weight_freight.pkl"
|
| 27 |
+
}
|
| 28 |
+
HF_TOKEN: Optional[str] = os.getenv("ACCESS")
|
| 29 |
+
FUNCTIONS_DIR: Path = Path(__file__).resolve().parents[1] / "domain" / "functions"
|
| 30 |
+
|
| 31 |
+
# Logging Configuration
|
| 32 |
+
LOG_LEVEL: str = "INFO"
|
| 33 |
+
LOG_FILE_ENABLED: bool = True
|
| 34 |
+
LOG_CONSOLE_ENABLED: bool = True
|
| 35 |
+
LOG_ROTATION_SIZE: int = 10 * 1024 * 1024 # 10MB
|
| 36 |
+
LOG_BACKUP_COUNT: int = 5
|
| 37 |
+
LOG_RETENTION_DAYS: int = 30
|
| 38 |
+
|
| 39 |
+
@classmethod
|
| 40 |
+
def get_predict_url(cls) -> str:
|
| 41 |
+
"""Lấy URL đầy đủ cho predict endpoint"""
|
| 42 |
+
return f"{cls.API_BASE_URL}/predict-transportation"
|
| 43 |
+
|
| 44 |
+
@classmethod
|
| 45 |
+
def get_chat_url(cls) -> str:
|
| 46 |
+
"""Lấy URL đầy đủ cho chat endpoint"""
|
| 47 |
+
return f"{cls.API_BASE_URL}/chat"
|
| 48 |
+
|
| 49 |
+
@classmethod
|
| 50 |
+
def validate_gemini_key(cls) -> bool:
|
| 51 |
+
"""Kiểm tra xem Gemini API key có được thiết lập không"""
|
| 52 |
+
return cls.GEMINI_API_KEY is not None and len(cls.GEMINI_API_KEY.strip()) > 0
|
| 53 |
+
|
| 54 |
+
@classmethod
|
| 55 |
+
def get_gemini_config(cls) -> dict:
|
| 56 |
+
return {
|
| 57 |
+
"api_key": cls.GEMINI_API_KEY
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
# Global settings instance
|
| 61 |
+
settings = Settings()
|
source/src/database/__init__.py
ADDED
|
File without changes
|
source/src/database/db.py
ADDED
|
File without changes
|
source/src/domain/__init__.py
ADDED
|
File without changes
|
source/src/domain/functions/get_options_function.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "get_transportation_options",
|
| 3 |
+
"description": "Lấy danh sách các tùy chọn có sẵn cho việc dự đoán phương thức vận chuyển. Trả về các phương thức vận chuyển có thể, danh sách mẫu của nhà cung cấp, quốc gia và mã dự án để người dùng tham khảo khi sử dụng function predict_transportation_mode.",
|
| 4 |
+
"parameters": {
|
| 5 |
+
"type": "object",
|
| 6 |
+
"properties": {}
|
| 7 |
+
}
|
| 8 |
+
}
|
source/src/domain/functions/predict_function.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "predict_transportation_mode",
|
| 3 |
+
"description": "Dự đoán phương thức vận chuyển tối ưu dựa trên thông tin dự án, quốc gia, giá cả và nhà cung cấp. API sẽ gợi ý phương thức vận chuyển phù hợp nhất (Air, Air Charter, Ocean, Truck) cùng với độ tin cậy và các lựa chọn thay thế. Sử dụng machine learning model được train từ dữ liệu historical logistics để đưa ra dự đoán chính xác.",
|
| 4 |
+
"parameters": {
|
| 5 |
+
"type": "object",
|
| 6 |
+
"properties": {
|
| 7 |
+
"project_code": {
|
| 8 |
+
"type": "string",
|
| 9 |
+
"description": "Mã dự án cần vận chuyển. Ví dụ: '100-CI-T01', '200-DH-M05'. Đây là identifier duy nhất cho mỗi dự án logistics."
|
| 10 |
+
},
|
| 11 |
+
"country": {
|
| 12 |
+
"type": "string",
|
| 13 |
+
"description": "Quốc gia đích cần giao hàng. Ví dụ: 'Vietnam', 'Nigeria', 'Kenya'. Sử dụng tên tiếng Anh đầy đủ của quốc gia."
|
| 14 |
+
},
|
| 15 |
+
"pack_price": {
|
| 16 |
+
"type": "number",
|
| 17 |
+
"description": "Giá trị mỗi gói hàng tính bằng USD. Ví dụ: 50.0, 120.5. Giá trị này ảnh hưởng đến quyết định phương thức vận chuyển."
|
| 18 |
+
},
|
| 19 |
+
"vendor": {
|
| 20 |
+
"type": "string",
|
| 21 |
+
"description": "Tên nhà cung cấp hoặc nhà sản xuất. Ví dụ: 'ABBOTT LABORATORIES', 'PFIZER INC'. Sử dụng tên đầy đủ chính thức của công ty."
|
| 22 |
+
},
|
| 23 |
+
"weight_kg": {
|
| 24 |
+
"type": "number",
|
| 25 |
+
"description": "Khối lượng thực tế của lô hàng tính bằng kg. Không bắt buộc - hệ thống sẽ ước tính dựa trên line_item_quantity nếu không có. Ví dụ: 25.0, 100.5"
|
| 26 |
+
},
|
| 27 |
+
"freight_cost_usd": {
|
| 28 |
+
"type": "number",
|
| 29 |
+
"description": "Chi phí vận chuyển dự kiến tính bằng USD. Không bắt buộc - hệ thống sẽ ước tính dựa trên weight và pack_price nếu không có. Ví dụ: 500.0, 1200.75"
|
| 30 |
+
},
|
| 31 |
+
"delivery_date": {
|
| 32 |
+
"type": "string",
|
| 33 |
+
"description": "Ngày giao hàng mong muốn theo định dạng YYYY-MM-DD. Không bắt buộc - sử dụng ngày hiện tại nếu không có. Ví dụ: '2025-08-20', '2025-12-15'"
|
| 34 |
+
},
|
| 35 |
+
"line_item_quantity": {
|
| 36 |
+
"type": "number",
|
| 37 |
+
"description": "Số lượng sản phẩm để hỗ trợ ước tính khối lượng khi weight_kg không có. Mặc định là 100. Ví dụ: 100, 500, 1000"
|
| 38 |
+
}
|
| 39 |
+
},
|
| 40 |
+
"required": ["project_code", "country", "pack_price", "vendor"]
|
| 41 |
+
}
|
| 42 |
+
}
|
source/src/domain/node/call_llm.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
File call_llm.py - LLM node với function calling
|
| 3 |
+
"""
|
| 4 |
+
import json
|
| 5 |
+
|
| 6 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 7 |
+
from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage
|
| 8 |
+
from langchain_core.tools import tool
|
| 9 |
+
|
| 10 |
+
from src.state.graph_state import TransportationState
|
| 11 |
+
from src.config.setting import settings
|
| 12 |
+
|
| 13 |
+
@tool
|
| 14 |
+
def predict_transportation_mode(country: str, pack_price: float, project_code: str, vendor: str) -> dict:
|
| 15 |
+
"""Dự đoán phương thức vận chuyển tối ưu"""
|
| 16 |
+
from src.app.schema.transportation import TransportationRequest
|
| 17 |
+
from src.app.api.predict import predict_transportation
|
| 18 |
+
|
| 19 |
+
request = TransportationRequest(
|
| 20 |
+
country=country,
|
| 21 |
+
pack_price=pack_price,
|
| 22 |
+
project_code=project_code,
|
| 23 |
+
vendor=vendor
|
| 24 |
+
)
|
| 25 |
+
response = predict_transportation(request)
|
| 26 |
+
return response.dict()
|
| 27 |
+
|
| 28 |
+
@tool
|
| 29 |
+
def get_transportation_options() -> dict:
|
| 30 |
+
"""Lấy danh sách options vận chuyển"""
|
| 31 |
+
from src.app.api.predict import get_transportation_options as get_options
|
| 32 |
+
return get_options()
|
| 33 |
+
|
| 34 |
+
class CallLLMNode:
|
| 35 |
+
def __init__(self):
|
| 36 |
+
self.llm = ChatGoogleGenerativeAI(
|
| 37 |
+
model="gemini-1.5-pro",
|
| 38 |
+
google_api_key=settings.GEMINI_API_KEY
|
| 39 |
+
)
|
| 40 |
+
self.llm_with_tools = self.llm.bind_tools([
|
| 41 |
+
predict_transportation_mode,
|
| 42 |
+
get_transportation_options
|
| 43 |
+
])
|
| 44 |
+
|
| 45 |
+
def _create_system_message(self) -> str:
|
| 46 |
+
"""System prompt cho transportation AI"""
|
| 47 |
+
return """Bạn là chuyên gia logistics và vận chuyển. Bạn có 2 functions:
|
| 48 |
+
|
| 49 |
+
1. predict_transportation_mode(country, pack_price, project_code, vendor)
|
| 50 |
+
- Dự đoán phương thức vận chuyển tối ưu
|
| 51 |
+
- Cần đủ 4 tham số
|
| 52 |
+
|
| 53 |
+
2. get_transportation_options()
|
| 54 |
+
- Lấy danh sách options có sẵn
|
| 55 |
+
- Không cần tham số
|
| 56 |
+
|
| 57 |
+
QUY TẮC:
|
| 58 |
+
- "dự đoán", "tối ưu", "nên chọn" → gọi predict_transportation_mode
|
| 59 |
+
- "options", "danh sách", "có gì" → gọi get_transportation_options
|
| 60 |
+
- Thiếu thông tin → hỏi ngược ngay
|
| 61 |
+
- Trả lời luật/quy định từ kiến thức"""
|
| 62 |
+
|
| 63 |
+
def __call__(self, state: TransportationState) -> TransportationState:
|
| 64 |
+
try:
|
| 65 |
+
messages = [
|
| 66 |
+
SystemMessage(content=self._create_system_message()),
|
| 67 |
+
HumanMessage(content=state["user_message"])
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
response = self.llm_with_tools.invoke(messages)
|
| 71 |
+
|
| 72 |
+
if response.tool_calls:
|
| 73 |
+
print(f"✅ Function calls detected: {len(response.tool_calls)}")
|
| 74 |
+
|
| 75 |
+
# Execute tools và lưu results
|
| 76 |
+
tool_messages = []
|
| 77 |
+
for tool_call in response.tool_calls:
|
| 78 |
+
func_name = tool_call["name"]
|
| 79 |
+
args = tool_call["args"]
|
| 80 |
+
|
| 81 |
+
print(f"🔧 Calling {func_name} with args: {args}")
|
| 82 |
+
|
| 83 |
+
if func_name == "predict_transportation_mode":
|
| 84 |
+
result = predict_transportation_mode.invoke(args)
|
| 85 |
+
elif func_name == "get_transportation_options":
|
| 86 |
+
result = get_transportation_options.invoke({})
|
| 87 |
+
else:
|
| 88 |
+
result = {"error": f"Unknown function: {func_name}"}
|
| 89 |
+
|
| 90 |
+
print(f"📊 Result: {result}")
|
| 91 |
+
|
| 92 |
+
# Save function call info
|
| 93 |
+
state["function_calls_made"].append({
|
| 94 |
+
"function_name": func_name,
|
| 95 |
+
"arguments": args,
|
| 96 |
+
"result": result
|
| 97 |
+
})
|
| 98 |
+
|
| 99 |
+
tool_messages.append(ToolMessage(
|
| 100 |
+
content=json.dumps(result),
|
| 101 |
+
tool_call_id=tool_call["id"]
|
| 102 |
+
))
|
| 103 |
+
|
| 104 |
+
# Get final response
|
| 105 |
+
final_messages = messages + [response] + tool_messages
|
| 106 |
+
final_response = self.llm.invoke(final_messages)
|
| 107 |
+
state["ai_response"] = final_response.content
|
| 108 |
+
print("✅ Function calling completed successfully")
|
| 109 |
+
else:
|
| 110 |
+
state["ai_response"] = response.content
|
| 111 |
+
print("ℹ️ No function calls needed")
|
| 112 |
+
|
| 113 |
+
state["current_step"] = "completed"
|
| 114 |
+
return state
|
| 115 |
+
|
| 116 |
+
except Exception as e:
|
| 117 |
+
print(f"❌ Error in LLM call: {e}")
|
| 118 |
+
state["error_message"] = f"LLM Error: {str(e)}"
|
| 119 |
+
state["current_step"] = "error"
|
| 120 |
+
return state
|
| 121 |
+
|
| 122 |
+
def create_call_llm_node() -> CallLLMNode:
|
| 123 |
+
"""Factory function to create CallLLMNode"""
|
| 124 |
+
return CallLLMNode()
|
source/src/domain/node/input.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Input Node - Node đầu tiên để nhận tin nhắn từ người dùng với conversation cache
|
| 3 |
+
"""
|
| 4 |
+
import json
|
| 5 |
+
import logging
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from src.state.graph_state import TransportationState
|
| 8 |
+
from src.config.logging_config import get_logger
|
| 9 |
+
|
| 10 |
+
logger = get_logger(__name__)
|
| 11 |
+
|
| 12 |
+
class InputNode:
|
| 13 |
+
|
| 14 |
+
def __init__(self):
|
| 15 |
+
self.name = "input_node"
|
| 16 |
+
self.cache_file = Path(__file__).resolve().parents[3] / "conversation_cache.json"
|
| 17 |
+
|
| 18 |
+
def load_conversation_history(self) -> list:
|
| 19 |
+
"""Load conversation history từ cache file"""
|
| 20 |
+
try:
|
| 21 |
+
if self.cache_file.exists():
|
| 22 |
+
with open(self.cache_file, 'r', encoding='utf-8') as f:
|
| 23 |
+
conversations = json.load(f)
|
| 24 |
+
return conversations[-10:] # Last 10 conversations
|
| 25 |
+
return []
|
| 26 |
+
except Exception as e:
|
| 27 |
+
logger.error(f"Error loading conversation history: {e}")
|
| 28 |
+
return []
|
| 29 |
+
|
| 30 |
+
def get_context_from_history(self, conversations: list) -> str:
|
| 31 |
+
"""Generate context string từ conversation history với format role/content"""
|
| 32 |
+
if not conversations:
|
| 33 |
+
return ""
|
| 34 |
+
|
| 35 |
+
context = "Previous conversations for context:\n"
|
| 36 |
+
# Group messages by pairs (user, assistant)
|
| 37 |
+
messages = []
|
| 38 |
+
for i in range(0, len(conversations), 2):
|
| 39 |
+
if i + 1 < len(conversations):
|
| 40 |
+
user_msg = conversations[i]
|
| 41 |
+
assistant_msg = conversations[i + 1]
|
| 42 |
+
if user_msg.get('role') == 'user' and assistant_msg.get('role') == 'assistant':
|
| 43 |
+
messages.append((user_msg.get('content', ''), assistant_msg.get('content', '')))
|
| 44 |
+
|
| 45 |
+
# Show last 3 conversation pairs
|
| 46 |
+
for i, (user_content, assistant_content) in enumerate(messages[-3:], 1):
|
| 47 |
+
user_short = user_content[:50] + "..." if len(user_content) > 50 else user_content
|
| 48 |
+
assistant_short = assistant_content[:50] + "..." if len(assistant_content) > 50 else assistant_content
|
| 49 |
+
context += f"{i}. User: {user_short} → AI: {assistant_short}\n"
|
| 50 |
+
|
| 51 |
+
return context + "\nCurrent conversation:\n"
|
| 52 |
+
|
| 53 |
+
def process_input(self, state: TransportationState) -> TransportationState:
|
| 54 |
+
"""
|
| 55 |
+
Nhận tin nhắn từ state, load conversation history để có context
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
state: Current state với user_message
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
Updated state với message và context
|
| 62 |
+
"""
|
| 63 |
+
user_message = state["user_message"]
|
| 64 |
+
logger.info(f"Received user message: {user_message[:100]}...")
|
| 65 |
+
|
| 66 |
+
try:
|
| 67 |
+
# Load conversation history
|
| 68 |
+
conversation_history = self.load_conversation_history()
|
| 69 |
+
state["conversation_cache"] = conversation_history
|
| 70 |
+
|
| 71 |
+
# Lưu message vào state (đã có sẵn)
|
| 72 |
+
state["user_message"] = user_message.strip()
|
| 73 |
+
state["current_step"] = "llm_processing"
|
| 74 |
+
state["error_message"] = None
|
| 75 |
+
|
| 76 |
+
# Add context cho LLM nếu có history
|
| 77 |
+
if conversation_history:
|
| 78 |
+
context = self.get_context_from_history(conversation_history)
|
| 79 |
+
state["user_message"] = context + user_message
|
| 80 |
+
logger.info(f"Added context from {len(conversation_history)} previous conversations")
|
| 81 |
+
|
| 82 |
+
logger.info("User message with context saved to state successfully")
|
| 83 |
+
|
| 84 |
+
except Exception as e:
|
| 85 |
+
error_msg = f"Error processing user input: {str(e)}"
|
| 86 |
+
logger.error(error_msg)
|
| 87 |
+
state["error_message"] = error_msg
|
| 88 |
+
|
| 89 |
+
return state
|
| 90 |
+
|
| 91 |
+
def __call__(self, state: TransportationState) -> TransportationState:
|
| 92 |
+
"""Callable interface cho LangGraph"""
|
| 93 |
+
return self.process_input(state)
|
| 94 |
+
|
| 95 |
+
# Factory function để tạo input node
|
| 96 |
+
def create_input_node() -> InputNode:
|
| 97 |
+
"""Tạo input node instance"""
|
| 98 |
+
return InputNode()
|
source/src/domain/node/save_cache.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Save Cache Node - Node cuối để lưu conversation vào cache
|
| 3 |
+
"""
|
| 4 |
+
import json
|
| 5 |
+
import logging
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
|
| 9 |
+
from src.state.graph_state import TransportationState
|
| 10 |
+
from src.config.logging_config import get_logger
|
| 11 |
+
|
| 12 |
+
logger = get_logger(__name__)
|
| 13 |
+
|
| 14 |
+
class SaveCacheNode:
|
| 15 |
+
"""Node lưu conversation vào cache file"""
|
| 16 |
+
|
| 17 |
+
def __init__(self):
|
| 18 |
+
self.name = "save_cache_node"
|
| 19 |
+
self.cache_file = Path(__file__).resolve().parents[3] / "conversation_cache.json"
|
| 20 |
+
|
| 21 |
+
def load_cache(self) -> list:
|
| 22 |
+
"""Load conversation cache từ file"""
|
| 23 |
+
try:
|
| 24 |
+
if self.cache_file.exists():
|
| 25 |
+
with open(self.cache_file, 'r', encoding='utf-8') as f:
|
| 26 |
+
return json.load(f)
|
| 27 |
+
return []
|
| 28 |
+
except Exception as e:
|
| 29 |
+
logger.error(f"Error loading cache: {e}")
|
| 30 |
+
return []
|
| 31 |
+
|
| 32 |
+
def save_cache(self, conversations: list):
|
| 33 |
+
"""Save conversation cache vào file"""
|
| 34 |
+
try:
|
| 35 |
+
# Keep only last 100 messages in file (50 pairs of user/assistant)
|
| 36 |
+
if len(conversations) > 100:
|
| 37 |
+
conversations = conversations[-100:]
|
| 38 |
+
|
| 39 |
+
with open(self.cache_file, 'w', encoding='utf-8') as f:
|
| 40 |
+
json.dump(conversations, f, ensure_ascii=False, indent=2)
|
| 41 |
+
logger.info(f"Saved {len(conversations)} messages to cache")
|
| 42 |
+
except Exception as e:
|
| 43 |
+
logger.error(f"Error saving cache: {e}")
|
| 44 |
+
|
| 45 |
+
def process_save(self, state: TransportationState) -> TransportationState:
|
| 46 |
+
"""
|
| 47 |
+
Lưu conversation hiện tại vào cache theo format role/content
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
state: Current state với user_message và ai_response
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
Updated state
|
| 54 |
+
"""
|
| 55 |
+
try:
|
| 56 |
+
if state["user_message"] and state["ai_response"]:
|
| 57 |
+
# Load existing cache
|
| 58 |
+
conversations = self.load_cache()
|
| 59 |
+
|
| 60 |
+
# Add current conversation với format role/content
|
| 61 |
+
new_conversation = [
|
| 62 |
+
{
|
| 63 |
+
"role": "user",
|
| 64 |
+
"content": state["user_message"],
|
| 65 |
+
"timestamp": datetime.now().isoformat()
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"role": "assistant",
|
| 69 |
+
"content": state["ai_response"],
|
| 70 |
+
"timestamp": datetime.now().isoformat(),
|
| 71 |
+
"function_calls": state["function_calls_made"] if state["function_calls_made"] else [],
|
| 72 |
+
"has_functions": len(state["function_calls_made"]) > 0 if state["function_calls_made"] else False
|
| 73 |
+
}
|
| 74 |
+
]
|
| 75 |
+
|
| 76 |
+
# Extend conversation list với messages mới
|
| 77 |
+
conversations.extend(new_conversation)
|
| 78 |
+
|
| 79 |
+
# Save back to file
|
| 80 |
+
self.save_cache(conversations)
|
| 81 |
+
|
| 82 |
+
# Update state với conversation history cho lần sau (lấy 20 messages cuối)
|
| 83 |
+
state["conversation_cache"] = conversations[-20:] if len(conversations) > 20 else conversations
|
| 84 |
+
|
| 85 |
+
logger.info("Conversation saved to cache in role/content format successfully")
|
| 86 |
+
else:
|
| 87 |
+
logger.warning("No complete conversation to save")
|
| 88 |
+
|
| 89 |
+
except Exception as e:
|
| 90 |
+
error_msg = f"Error saving conversation to cache: {str(e)}"
|
| 91 |
+
logger.error(error_msg)
|
| 92 |
+
state["error_message"] = error_msg
|
| 93 |
+
|
| 94 |
+
return state
|
| 95 |
+
|
| 96 |
+
def __call__(self, state: TransportationState) -> TransportationState:
|
| 97 |
+
"""Callable interface for LangGraph"""
|
| 98 |
+
return self.process_save(state)
|
| 99 |
+
|
| 100 |
+
def create_save_cache_node() -> SaveCacheNode:
|
| 101 |
+
"""Factory function để tạo save_cache node"""
|
| 102 |
+
return SaveCacheNode()
|
| 103 |
+
|
source/src/domain/transportation_graph.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langgraph.graph import StateGraph, START, END
|
| 2 |
+
from src.state.graph_state import TransportationState
|
| 3 |
+
from src.domain.node.input import create_input_node
|
| 4 |
+
from src.domain.node.call_llm import create_call_llm_node
|
| 5 |
+
from src.domain.node.save_cache import create_save_cache_node
|
| 6 |
+
|
| 7 |
+
def create_transportation_workflow() -> StateGraph:
|
| 8 |
+
workflow = StateGraph(TransportationState)
|
| 9 |
+
|
| 10 |
+
# Create nodes
|
| 11 |
+
input_node = create_input_node()
|
| 12 |
+
llm_node = create_call_llm_node()
|
| 13 |
+
save_node = create_save_cache_node()
|
| 14 |
+
|
| 15 |
+
# Add nodes
|
| 16 |
+
workflow.add_node("input", input_node)
|
| 17 |
+
workflow.add_node("llm", llm_node)
|
| 18 |
+
workflow.add_node("save_cache", save_node)
|
| 19 |
+
|
| 20 |
+
# Add edges
|
| 21 |
+
workflow.add_edge(START, "input")
|
| 22 |
+
workflow.add_edge("input", "llm")
|
| 23 |
+
workflow.add_edge("llm", "save_cache")
|
| 24 |
+
workflow.add_edge("save_cache", END)
|
| 25 |
+
|
| 26 |
+
return workflow
|
| 27 |
+
|
| 28 |
+
app = create_transportation_workflow().compile()
|
source/src/main.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from src.app.api.predict import router as transportation_router
|
| 3 |
+
from src.app.api.chat import router as chat_router
|
| 4 |
+
from src.config.logging_config import setup_logging, get_logger
|
| 5 |
+
|
| 6 |
+
# Setup logging trước khi khởi tạo app
|
| 7 |
+
setup_logging()
|
| 8 |
+
logger = get_logger(__name__)
|
| 9 |
+
|
| 10 |
+
app = FastAPI(
|
| 11 |
+
title="Transportation Prediction API",
|
| 12 |
+
description="API dự đoán phương thức vận chuyển dùng mô hình XGBoost với đầu vào đơn giản",
|
| 13 |
+
version="2.0.0"
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
logger.info("Starting Transportation Prediction API v2.0")
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# New simplified API endpoint
|
| 20 |
+
app.include_router(transportation_router, prefix="/api", tags=["transportation"])
|
| 21 |
+
app.include_router(chat_router, prefix="/api", tags=["chat"])
|
| 22 |
+
|
| 23 |
+
@app.get("/")
|
| 24 |
+
def root():
|
| 25 |
+
return {
|
| 26 |
+
"message": "Transportation Prediction API v2.0",
|
| 27 |
+
"docs": "/docs",
|
| 28 |
+
"endpoints": {
|
| 29 |
+
"predict_transportation": "/api/predict-transportation",
|
| 30 |
+
"transportation_options": "/api/transportation-options",
|
| 31 |
+
"legacy_predict": "/api/predict",
|
| 32 |
+
"chat": "/api/chat (streaming)"
|
| 33 |
+
},
|
| 34 |
+
"description": "Sử dụng /api/predict-transportation với đầu vào đơn giản để dự đoán phương thức vận chuyển, hoặc /api/chat để chat với AI (streaming response)"
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
# Only run server if this file is executed directly
|
| 38 |
+
if __name__ == "__main__":
|
| 39 |
+
import uvicorn
|
| 40 |
+
logger.info("Starting server on localhost:3454")
|
| 41 |
+
uvicorn.run(app, host="localhost", port=3454)
|
source/src/state/graph_state.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Graph State với conversation cache support - LangGraph compatible
|
| 3 |
+
"""
|
| 4 |
+
from typing import Optional, Dict, Any, List
|
| 5 |
+
from typing_extensions import TypedDict
|
| 6 |
+
|
| 7 |
+
class TransportationState(TypedDict):
|
| 8 |
+
# Input/Output
|
| 9 |
+
user_message: str
|
| 10 |
+
ai_response: str
|
| 11 |
+
|
| 12 |
+
# Conversation cache
|
| 13 |
+
conversation_cache: List[Dict[str, Any]]
|
| 14 |
+
|
| 15 |
+
# Function tracking
|
| 16 |
+
function_calls_made: List[Dict[str, Any]]
|
| 17 |
+
|
| 18 |
+
# Error handling
|
| 19 |
+
error_message: Optional[str]
|
| 20 |
+
|
| 21 |
+
# Status
|
| 22 |
+
current_step: str
|
| 23 |
+
|
| 24 |
+
def create_initial_state(user_message: str = "") -> TransportationState:
|
| 25 |
+
"""Create initial state with defaults"""
|
| 26 |
+
return TransportationState(
|
| 27 |
+
user_message=user_message,
|
| 28 |
+
ai_response="",
|
| 29 |
+
conversation_cache=[],
|
| 30 |
+
function_calls_made=[],
|
| 31 |
+
error_message=None,
|
| 32 |
+
current_step="input"
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
def add_function_call(state: TransportationState, function_name: str, arguments: Dict[str, Any], result: Dict[str, Any]) -> TransportationState:
|
| 36 |
+
"""Add function call to state"""
|
| 37 |
+
state["function_calls_made"].append({
|
| 38 |
+
"function_name": function_name,
|
| 39 |
+
"arguments": arguments,
|
| 40 |
+
"result": result
|
| 41 |
+
})
|
| 42 |
+
return state
|
| 43 |
+
|
| 44 |
+
def set_error(state: TransportationState, error: str) -> TransportationState:
|
| 45 |
+
"""Set error in state"""
|
| 46 |
+
state["error_message"] = error
|
| 47 |
+
return state
|
source/start.bat
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@echo off
|
| 2 |
+
REM Start script cho Windows
|
| 3 |
+
REM Chạy cả backend và frontend
|
| 4 |
+
|
| 5 |
+
echo 🚀 Starting Transportation AI Platform...
|
| 6 |
+
|
| 7 |
+
REM Start backend FastAPI server in background
|
| 8 |
+
echo 📡 Starting Backend API Server...
|
| 9 |
+
start /B uvicorn src.main:app --host 0.0.0.0 --port 3454
|
| 10 |
+
|
| 11 |
+
REM Wait for backend to start
|
| 12 |
+
timeout /t 5 /nobreak > nul
|
| 13 |
+
|
| 14 |
+
REM Check if backend is running (simplified)
|
| 15 |
+
echo ✅ Backend API should be running on port 3454
|
| 16 |
+
|
| 17 |
+
REM Start frontend Gradio app
|
| 18 |
+
echo 🎨 Starting Frontend Gradio App...
|
| 19 |
+
python app.py
|
source/start.sh
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Start script cho Docker container
|
| 4 |
+
# Chạy cả backend và frontend
|
| 5 |
+
|
| 6 |
+
echo "🚀 Starting Transportation AI Platform..."
|
| 7 |
+
|
| 8 |
+
# Start backend FastAPI server in background
|
| 9 |
+
echo "📡 Starting Backend API Server..."
|
| 10 |
+
uvicorn src.main:app --host 0.0.0.0 --port 3454 &
|
| 11 |
+
BACKEND_PID=$!
|
| 12 |
+
|
| 13 |
+
# Wait a bit for backend to start
|
| 14 |
+
sleep 5
|
| 15 |
+
|
| 16 |
+
# Check if backend is running
|
| 17 |
+
if curl -f http://localhost:3454/ > /dev/null 2>&1; then
|
| 18 |
+
echo "✅ Backend API is running on port 3454"
|
| 19 |
+
else
|
| 20 |
+
echo "❌ Backend failed to start"
|
| 21 |
+
exit 1
|
| 22 |
+
fi
|
| 23 |
+
|
| 24 |
+
# Start frontend Gradio app
|
| 25 |
+
echo "🎨 Starting Frontend Gradio App..."
|
| 26 |
+
python app.py
|
| 27 |
+
|
| 28 |
+
# Clean up background process on exit
|
| 29 |
+
trap "kill $BACKEND_PID" EXIT
|