github-actions[bot] commited on
Commit ·
39856b4
1
Parent(s): 0bcb01c
Auto-deploy from GitHub: 1fbc6e7e1a7e8157edee173a9390286563fe3106
Browse files- .gitattributes +0 -35
- DEPLOYMENT.md +220 -0
- Dockerfile +28 -11
- README.md +33 -10
- __init__.py +9 -0
- app.py +47 -0
- core.py +375 -0
- packages.txt +2 -0
- requirements.txt +5 -3
- src/streamlit_app.py +0 -40
.gitattributes
DELETED
|
@@ -1,35 +0,0 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DEPLOYMENT.md
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Deploying NaviBlu Chatbot to Hugging Face Spaces
|
| 2 |
+
|
| 3 |
+
This guide explains how to deploy the NaviBlu chatbot to Hugging Face Spaces.
|
| 4 |
+
|
| 5 |
+
> ⚠️ **Note**: As of April 2025, Hugging Face deprecated the built-in Streamlit SDK. Streamlit apps now require Docker. This folder is already configured for Docker deployment.
|
| 6 |
+
|
| 7 |
+
## Prerequisites
|
| 8 |
+
|
| 9 |
+
1. **Hugging Face Account**: Sign up at https://huggingface.co/
|
| 10 |
+
2. **API Keys**: You'll need these as Secrets in your Space:
|
| 11 |
+
- `GROQ_API_KEY`
|
| 12 |
+
- `AMADEUS_API_KEY`
|
| 13 |
+
- `AMADEUS_API_SECRET`
|
| 14 |
+
|
| 15 |
+
## Files in This Folder
|
| 16 |
+
|
| 17 |
+
All files needed for Hugging Face Spaces deployment:
|
| 18 |
+
|
| 19 |
+
```
|
| 20 |
+
chatbot/
|
| 21 |
+
├── app.py # Streamlit entry point
|
| 22 |
+
├── core.py # Core chatbot logic
|
| 23 |
+
├── __init__.py # Module initialization
|
| 24 |
+
├── Dockerfile # Docker configuration (required)
|
| 25 |
+
├── README.md # HF Space metadata (YAML frontmatter)
|
| 26 |
+
├── requirements.txt # Python dependencies
|
| 27 |
+
├── packages.txt # System dependencies (apt-get)
|
| 28 |
+
├── DEPLOYMENT.md # This guide
|
| 29 |
+
└── .streamlit/
|
| 30 |
+
└── config.toml # Streamlit configuration
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
## Deployment Steps
|
| 34 |
+
|
| 35 |
+
### Step 1: Create a New Hugging Face Space
|
| 36 |
+
|
| 37 |
+
1. Go to https://huggingface.co/new-space
|
| 38 |
+
2. Configure:
|
| 39 |
+
- **Space name**: `naviblu-travel-assistant` (or your choice)
|
| 40 |
+
- **License**: MIT (or your preference)
|
| 41 |
+
- **SDK**: Select **Docker**
|
| 42 |
+
- **Hardware**: **CPU basic** (free tier is sufficient)
|
| 43 |
+
3. Click **Create Space**
|
| 44 |
+
|
| 45 |
+
### Step 2: Deploy the Chatbot Folder
|
| 46 |
+
|
| 47 |
+
**Option A: Upload via Web Interface (Easiest)**
|
| 48 |
+
|
| 49 |
+
1. In your new Space, click **Files** tab
|
| 50 |
+
2. Click **Add file** → **Upload files**
|
| 51 |
+
3. Upload ALL files from this `chatbot/` folder:
|
| 52 |
+
- `app.py`
|
| 53 |
+
- `core.py`
|
| 54 |
+
- `__init__.py`
|
| 55 |
+
- `Dockerfile`
|
| 56 |
+
- `README.md`
|
| 57 |
+
- `requirements.txt`
|
| 58 |
+
- `packages.txt`
|
| 59 |
+
- `.streamlit/config.toml` (create the folder structure)
|
| 60 |
+
4. Commit the files
|
| 61 |
+
|
| 62 |
+
**Option B: Git Push**
|
| 63 |
+
|
| 64 |
+
1. Clone your new Space:
|
| 65 |
+
```bash
|
| 66 |
+
git clone https://huggingface.co/spaces/YOUR-USERNAME/naviblu-travel-assistant
|
| 67 |
+
cd naviblu-travel-assistant
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
2. Copy files from the chatbot folder:
|
| 71 |
+
```bash
|
| 72 |
+
cp -r /path/to/NaviBlu_Travel_Assistant/chatbot/* .
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
3. Push to Hugging Face:
|
| 76 |
+
```bash
|
| 77 |
+
git add .
|
| 78 |
+
git commit -m "Deploy NaviBlu chatbot"
|
| 79 |
+
git push
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
### Step 3: Add API Keys as Secrets
|
| 83 |
+
|
| 84 |
+
1. In your Space, go to **Settings** → **Repository secrets**
|
| 85 |
+
2. Add the following secrets:
|
| 86 |
+
- **Name**: `GROQ_API_KEY`, **Value**: Your Groq API key
|
| 87 |
+
- **Name**: `AMADEUS_API_KEY`, **Value**: Your Amadeus API key
|
| 88 |
+
- **Name**: `AMADEUS_API_SECRET`, **Value**: Your Amadeus API secret
|
| 89 |
+
|
| 90 |
+
### Step 4: Wait for Build
|
| 91 |
+
|
| 92 |
+
The Space will build the Docker image and start (typically 3-7 minutes for first build). Monitor progress in the **App** tab or **Logs**.
|
| 93 |
+
|
| 94 |
+
### Step 5: Test Your Space
|
| 95 |
+
|
| 96 |
+
Your chatbot will be live at:
|
| 97 |
+
```
|
| 98 |
+
https://huggingface.co/spaces/YOUR-USERNAME/naviblu-travel-assistant
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
## Embedding in Website
|
| 102 |
+
|
| 103 |
+
To embed in your website (`index.html`), update the iframe URL:
|
| 104 |
+
|
| 105 |
+
```html
|
| 106 |
+
<iframe src="https://YOUR-USERNAME-naviblu-travel-assistant.hf.space"
|
| 107 |
+
class="streamlit-iframe"
|
| 108 |
+
title="NaviBlu Travel Assistant">
|
| 109 |
+
</iframe>
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
**Note**: The embed URL format for Docker Spaces is `https://USERNAME-SPACENAME.hf.space`
|
| 113 |
+
|
| 114 |
+
## Understanding the Docker Setup
|
| 115 |
+
|
| 116 |
+
### Dockerfile Explained
|
| 117 |
+
|
| 118 |
+
```dockerfile
|
| 119 |
+
FROM python:3.11-slim # Base Python image
|
| 120 |
+
WORKDIR /app # Set working directory
|
| 121 |
+
COPY requirements.txt . # Copy dependencies
|
| 122 |
+
RUN pip install ... # Install Python packages
|
| 123 |
+
COPY . . # Copy all app files
|
| 124 |
+
RUN useradd -m -u 1000 user # Create non-root user (HF requirement)
|
| 125 |
+
USER user # Switch to non-root user
|
| 126 |
+
EXPOSE 8501 # Expose Streamlit port
|
| 127 |
+
CMD ["streamlit", "run", ...] # Start command
|
| 128 |
+
```
|
| 129 |
+
|
| 130 |
+
### README.md YAML Frontmatter
|
| 131 |
+
|
| 132 |
+
```yaml
|
| 133 |
+
---
|
| 134 |
+
title: NaviBlu Travel Assistant
|
| 135 |
+
sdk: docker # Required for Streamlit now
|
| 136 |
+
app_port: 8501 # Streamlit's default port
|
| 137 |
+
---
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
## Troubleshooting
|
| 141 |
+
|
| 142 |
+
### Build Fails
|
| 143 |
+
|
| 144 |
+
- Check **Logs** in the App tab for error messages
|
| 145 |
+
- Verify Dockerfile syntax is correct
|
| 146 |
+
- Ensure all files are uploaded (especially `requirements.txt`)
|
| 147 |
+
|
| 148 |
+
### App Stuck on "Starting"
|
| 149 |
+
|
| 150 |
+
- Make sure `app_port: 8501` is in README.md frontmatter
|
| 151 |
+
- Verify Streamlit is configured to run on `0.0.0.0:8501`
|
| 152 |
+
- Check that Dockerfile exposes port 8501
|
| 153 |
+
|
| 154 |
+
### Import Errors
|
| 155 |
+
|
| 156 |
+
- Ensure `core.py` and `__init__.py` are uploaded
|
| 157 |
+
- Check that `app.py` imports use `from core import Chatbot` (not relative)
|
| 158 |
+
|
| 159 |
+
### API Errors
|
| 160 |
+
|
| 161 |
+
- Verify secrets are named exactly as expected (case-sensitive)
|
| 162 |
+
- Test API keys independently to ensure they're valid
|
| 163 |
+
|
| 164 |
+
### Permission Errors
|
| 165 |
+
|
| 166 |
+
- The Dockerfile creates a non-root user - this is required by HF
|
| 167 |
+
- Make sure you're not trying to write to system directories
|
| 168 |
+
|
| 169 |
+
## Updating the Space
|
| 170 |
+
|
| 171 |
+
### Option A: Automatic via GitHub Actions (Recommended)
|
| 172 |
+
|
| 173 |
+
If you set up the GitHub Actions workflow, simply push changes to the `chatbot/` folder on GitHub - it will automatically deploy to Hugging Face!
|
| 174 |
+
|
| 175 |
+
**One-time setup:**
|
| 176 |
+
1. Go to your GitHub repo → **Settings** → **Secrets and variables** → **Actions**
|
| 177 |
+
2. Add these secrets:
|
| 178 |
+
- `HF_TOKEN`: Your Hugging Face access token (from https://huggingface.co/settings/tokens)
|
| 179 |
+
- `HF_SPACE_NAME`: Your Space path (e.g., `your-username/naviblu-travel-assistant`)
|
| 180 |
+
|
| 181 |
+
Then every push to `chatbot/` will auto-deploy!
|
| 182 |
+
|
| 183 |
+
### Option B: Manual Push
|
| 184 |
+
|
| 185 |
+
```bash
|
| 186 |
+
# After making changes locally
|
| 187 |
+
cd your-space-clone
|
| 188 |
+
cp -r /path/to/chatbot/* .
|
| 189 |
+
git add .
|
| 190 |
+
git commit -m "Update chatbot"
|
| 191 |
+
git push
|
| 192 |
+
```
|
| 193 |
+
|
| 194 |
+
### Option C: Web Upload
|
| 195 |
+
|
| 196 |
+
Upload updated files through the Hugging Face web interface.
|
| 197 |
+
|
| 198 |
+
## Cost
|
| 199 |
+
|
| 200 |
+
- **CPU basic**: Free (sufficient for this chatbot)
|
| 201 |
+
- **CPU upgrade**: ~$0.50/hour
|
| 202 |
+
- **GPU**: Starting at ~$0.60/hour (not needed)
|
| 203 |
+
|
| 204 |
+
## Why Hugging Face Spaces?
|
| 205 |
+
|
| 206 |
+
✅ **Free tier** with good resources
|
| 207 |
+
✅ **Automatic HTTPS** and domain
|
| 208 |
+
✅ **Easy secrets management** for API keys
|
| 209 |
+
✅ **Git-based deployment**
|
| 210 |
+
✅ **Docker support** for full control
|
| 211 |
+
✅ **Good uptime** and reliability
|
| 212 |
+
✅ **Embed-friendly** for websites
|
| 213 |
+
|
| 214 |
+
## Next Steps
|
| 215 |
+
|
| 216 |
+
1. ✅ Deploy chatbot to Hugging Face Spaces
|
| 217 |
+
2. ✅ Test the Space URL directly
|
| 218 |
+
3. ✅ Update `index.html` with new embed URL
|
| 219 |
+
4. ✅ Deploy website to GitHub Pages
|
| 220 |
+
5. ✅ Test the fully integrated experience
|
Dockerfile
CHANGED
|
@@ -1,20 +1,37 @@
|
|
| 1 |
-
|
|
|
|
| 2 |
|
|
|
|
| 3 |
WORKDIR /app
|
| 4 |
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
|
| 11 |
-
|
| 12 |
-
COPY
|
|
|
|
| 13 |
|
| 14 |
-
|
|
|
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
EXPOSE 8501
|
| 17 |
|
| 18 |
-
|
|
|
|
| 19 |
|
| 20 |
-
|
|
|
|
|
|
| 1 |
+
# Hugging Face Spaces Dockerfile for Streamlit
|
| 2 |
+
FROM python:3.11-slim
|
| 3 |
|
| 4 |
+
# Set working directory
|
| 5 |
WORKDIR /app
|
| 6 |
|
| 7 |
+
# Install system dependencies
|
| 8 |
+
COPY packages.txt .
|
| 9 |
+
RUN apt-get update && \
|
| 10 |
+
xargs -a packages.txt apt-get install -y && \
|
| 11 |
+
rm -rf /var/lib/apt/lists/*
|
| 12 |
|
| 13 |
+
# Install Python dependencies
|
| 14 |
+
COPY requirements.txt .
|
| 15 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 16 |
|
| 17 |
+
# Copy application files
|
| 18 |
+
COPY . .
|
| 19 |
|
| 20 |
+
# Create a non-root user for security (required by HF Spaces)
|
| 21 |
+
RUN useradd -m -u 1000 user
|
| 22 |
+
USER user
|
| 23 |
+
|
| 24 |
+
# Set environment variables
|
| 25 |
+
ENV HOME=/home/user \
|
| 26 |
+
PATH=/home/user/.local/bin:$PATH \
|
| 27 |
+
STREAMLIT_SERVER_PORT=8501 \
|
| 28 |
+
STREAMLIT_SERVER_ADDRESS=0.0.0.0
|
| 29 |
+
|
| 30 |
+
# Expose the Streamlit port
|
| 31 |
EXPOSE 8501
|
| 32 |
|
| 33 |
+
# Health check
|
| 34 |
+
HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health || exit 1
|
| 35 |
|
| 36 |
+
# Run Streamlit
|
| 37 |
+
CMD ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
|
README.md
CHANGED
|
@@ -1,19 +1,42 @@
|
|
| 1 |
---
|
| 2 |
title: NaviBlu Travel Assistant
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
| 7 |
app_port: 8501
|
| 8 |
-
tags:
|
| 9 |
-
- streamlit
|
| 10 |
pinned: false
|
| 11 |
-
|
| 12 |
---
|
| 13 |
|
| 14 |
-
#
|
| 15 |
|
| 16 |
-
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
title: NaviBlu Travel Assistant
|
| 3 |
+
emoji: ✈️
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: indigo
|
| 6 |
sdk: docker
|
| 7 |
app_port: 8501
|
|
|
|
|
|
|
| 8 |
pinned: false
|
| 9 |
+
license: mit
|
| 10 |
---
|
| 11 |
|
| 12 |
+
# NaviBlu Travel Assistant
|
| 13 |
|
| 14 |
+
AI-powered travel chatbot that helps you find flights, hotels, and discover destinations.
|
| 15 |
|
| 16 |
+
Built with Streamlit and powered by Meta's Llama 3.3 70B model via Groq.
|
| 17 |
+
|
| 18 |
+
## Features
|
| 19 |
+
|
| 20 |
+
- ✈️ **Flight Search** - Find flights from 400+ airlines
|
| 21 |
+
- 🏨 **Hotel Search** - Browse 150,000+ hotels worldwide
|
| 22 |
+
- 📍 **Location Info** - Discover attractions and activities
|
| 23 |
+
- 💬 **Natural Conversation** - Just chat naturally, no forms needed
|
| 24 |
+
|
| 25 |
+
## Usage
|
| 26 |
+
|
| 27 |
+
Simply type your travel questions in the chat, for example:
|
| 28 |
+
- "Find me flights from New York to London next weekend"
|
| 29 |
+
- "What hotels are available in Paris this Friday?"
|
| 30 |
+
- "What are popular things to do in Tokyo?"
|
| 31 |
+
|
| 32 |
+
## API Keys Required
|
| 33 |
+
|
| 34 |
+
This Space requires the following secrets to be configured:
|
| 35 |
+
- `GROQ_API_KEY` - For LLM inference
|
| 36 |
+
- `AMADEUS_API_KEY` - For hotel data
|
| 37 |
+
- `AMADEUS_API_SECRET` - For hotel data
|
| 38 |
+
|
| 39 |
+
## Credits
|
| 40 |
+
|
| 41 |
+
**ITCS 6112 - Group 5**
|
| 42 |
+
Cameron Detig • Juan Rojas • Evelyn Hosana • Varsha Chintalapati
|
__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NaviBlu Travel Assistant - Chatbot Module
|
| 3 |
+
|
| 4 |
+
This module contains the core chatbot logic and Streamlit UI.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from .core import Chatbot
|
| 8 |
+
|
| 9 |
+
__all__ = ['Chatbot']
|
app.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hugging Face Space Entry Point for NaviBlu Travel Assistant
|
| 2 |
+
|
| 3 |
+
import streamlit as st
|
| 4 |
+
import os
|
| 5 |
+
from core import Chatbot
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
|
| 8 |
+
load_dotenv()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Initialize chat history in streamlit session state
|
| 12 |
+
if "messages" not in st.session_state or st.session_state.messages is None:
|
| 13 |
+
st.session_state.messages = []
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# Initialize chatbot object in streamlit session state
|
| 17 |
+
if "chatbot" not in st.session_state or st.session_state.chatbot is None:
|
| 18 |
+
st.session_state.chatbot = Chatbot()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# Streamlit App ----------------------------------------------------------------------
|
| 22 |
+
|
| 23 |
+
st.title("NaviBlu Travel Assistant")
|
| 24 |
+
|
| 25 |
+
# Display chat messages from history on app rerun
|
| 26 |
+
for message in st.session_state.messages:
|
| 27 |
+
with st.chat_message(message["role"]):
|
| 28 |
+
st.markdown(message["content"])
|
| 29 |
+
|
| 30 |
+
# Accept user input
|
| 31 |
+
if prompt := st.chat_input("Type Here"):
|
| 32 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 33 |
+
|
| 34 |
+
with st.chat_message("user"):
|
| 35 |
+
# Display user query in UI
|
| 36 |
+
st.markdown(prompt)
|
| 37 |
+
|
| 38 |
+
with st.chat_message("assistant"):
|
| 39 |
+
|
| 40 |
+
# Call shared chatbot logic
|
| 41 |
+
response = st.session_state.chatbot.process_input(prompt) # type: ignore
|
| 42 |
+
|
| 43 |
+
# Display response in UI
|
| 44 |
+
st.markdown(response)
|
| 45 |
+
|
| 46 |
+
# Add response to chat history
|
| 47 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
core.py
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core backend chatbot logic with agent-based architecture
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
from datetime import date
|
| 6 |
+
from amadeus import Client, ResponseError # Hotels API
|
| 7 |
+
from fast_flights import FlightData, Passengers, Result, get_flights # Flights API
|
| 8 |
+
from groq import Groq
|
| 9 |
+
from dotenv import load_dotenv
|
| 10 |
+
load_dotenv()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# Chatbot --------------------------------------------------------------------------------
|
| 14 |
+
|
| 15 |
+
class Chatbot():
|
| 16 |
+
def __init__(self):
|
| 17 |
+
self.input_prompt = ""
|
| 18 |
+
self.todays_date = date.today().isoformat()
|
| 19 |
+
|
| 20 |
+
self.LLM_model = "llama-3.3-70b-versatile"
|
| 21 |
+
|
| 22 |
+
self.amadeus = Client(
|
| 23 |
+
client_id = os.getenv("AMADEUS_API_KEY"),
|
| 24 |
+
client_secret = os.getenv("AMADEUS_API_SECRET")
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
# chat_history to store all of the messages in the conversation.
|
| 28 |
+
# Set the system prompt to establish the behavior of the chatbot.
|
| 29 |
+
self.chat_history = [
|
| 30 |
+
{"role": "system",
|
| 31 |
+
"content": "You are a travel assistant named NaviBlu who helps provide information to users for planning trips and vacations."}
|
| 32 |
+
]
|
| 33 |
+
# seperate history to store just the user's messages
|
| 34 |
+
self.user_message_history = []
|
| 35 |
+
|
| 36 |
+
self.flight_info = ""
|
| 37 |
+
self.hotel_info = ""
|
| 38 |
+
self.location_info = ""
|
| 39 |
+
self.general_info = ""
|
| 40 |
+
|
| 41 |
+
# establish Groq client for API calls
|
| 42 |
+
self.client = Groq(api_key=os.getenv("GROQ_API_KEY"))
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def process_input(self, input_prompt:str):
|
| 48 |
+
'''Determine if the user prompt is asking for information on flights, hotels, location, or general info.'''
|
| 49 |
+
|
| 50 |
+
print("Running process_input function")
|
| 51 |
+
|
| 52 |
+
self.input_prompt = input_prompt
|
| 53 |
+
print(f"Input prompt: {self.input_prompt}")
|
| 54 |
+
|
| 55 |
+
# Reset info
|
| 56 |
+
self.flight_info = ""
|
| 57 |
+
self.hotel_info = ""
|
| 58 |
+
self.location_info = ""
|
| 59 |
+
self.general_info = ""
|
| 60 |
+
|
| 61 |
+
# Add to chat history
|
| 62 |
+
self.chat_history.append({"role": "user", "content": str(self.input_prompt)})
|
| 63 |
+
self.user_message_history.append({"role": "user", "content": str(self.input_prompt)})
|
| 64 |
+
|
| 65 |
+
# make seperate chat history just for this process
|
| 66 |
+
process_input_history = [{
|
| 67 |
+
"role": "system",
|
| 68 |
+
"content": """You are an AI assistant tasked with determining if the user's most recent prompt is looking for the categories of 'flight', 'hotel', 'location', or 'general' information.
|
| 69 |
+
If a category has already been used in an earlier assistant message and the most recent user message does not require it to be run again, do not run it again!! Just return the category they are currently looking for.
|
| 70 |
+
Return only the names of the categories the prompt is asking about, 'flight', 'hotel', 'location', or 'general'. Only include 'location' if the user is asking for information about attractions and activities at a location.
|
| 71 |
+
|
| 72 |
+
Here are some examples to follow:
|
| 73 |
+
user: can you find flights to New York?
|
| 74 |
+
assistant: flight
|
| 75 |
+
|
| 76 |
+
user: can you find hotels in vancouver this weekend?
|
| 77 |
+
assistant: hotel
|
| 78 |
+
|
| 79 |
+
user: what are hotels near popular tourist locations in Orlando?
|
| 80 |
+
assistant: hotel, location
|
| 81 |
+
|
| 82 |
+
user: help me plan an entire trip from Charlotte to London this weekend.
|
| 83 |
+
assistant: flight, hotel, location
|
| 84 |
+
|
| 85 |
+
user: what are some popular activities to do in Wellington, New Zealand?
|
| 86 |
+
assistant: location
|
| 87 |
+
|
| 88 |
+
user: How far is Tokyo from New York?
|
| 89 |
+
assistant: general
|
| 90 |
+
"""
|
| 91 |
+
}]
|
| 92 |
+
|
| 93 |
+
# Add all of the user's messages to process_input_history
|
| 94 |
+
for message in self.chat_history:
|
| 95 |
+
process_input_history.append(message)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
completion = self.client.chat.completions.create(
|
| 99 |
+
model = self.LLM_model,
|
| 100 |
+
messages = process_input_history, # type: ignore
|
| 101 |
+
temperature = 0.1
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
categories = str(completion.choices[0].message.content)
|
| 105 |
+
print(f"---User Query Categories: {categories}---")
|
| 106 |
+
self.user_message_history.append({"role": "assistant", "content": str(categories)}) # Add what agents were called for each query
|
| 107 |
+
|
| 108 |
+
if ("flight" in categories):
|
| 109 |
+
self.flight_info = self.flight_agent()
|
| 110 |
+
|
| 111 |
+
if ("hotel" in categories):
|
| 112 |
+
self.hotel_info = self.hotel_agent()
|
| 113 |
+
|
| 114 |
+
if ("location" in categories):
|
| 115 |
+
self.location_info = self.location_agent()
|
| 116 |
+
|
| 117 |
+
if ("general" in categories or len(categories) == 0):
|
| 118 |
+
self.general_info = self.general_info_agent()
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
# Combine the responses of all the agents
|
| 122 |
+
assistant_response = f"{self.flight_info}\n + {self.hotel_info}\n + {self.location_info}\n + {self.general_info}"
|
| 123 |
+
|
| 124 |
+
# Add AI response to chat history
|
| 125 |
+
self.chat_history.append({"role": "assistant", "content": assistant_response})
|
| 126 |
+
|
| 127 |
+
print("Outputing Message.\n")
|
| 128 |
+
return assistant_response
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def flight_agent(self):
|
| 135 |
+
"""Uses an LLM to parse flight parameters the user is searching for. Then uses an API to search for available flights."""
|
| 136 |
+
print("Running flight agent.")
|
| 137 |
+
|
| 138 |
+
flight_info_prompt = f"""Use this user message history to extract the flight information the user is currently looking for:
|
| 139 |
+
user message history: {self.user_message_history}
|
| 140 |
+
format your response as a json exactly structured like below and nothing else. If you can't find information for a section, make an educated guess based on the message history.
|
| 141 |
+
If needed, today's date is {self.todays_date}.
|
| 142 |
+
|
| 143 |
+
{{
|
| 144 |
+
"tripType": either "round-trip" or "one-way",
|
| 145 |
+
"originCity": three letter origin city iataCode,
|
| 146 |
+
"destinationCity": three letter destination city iataCode,
|
| 147 |
+
"originAirport": three letter origin airport code based on the city,
|
| 148 |
+
"destinationAirport": three letter destination airport code based on the city,
|
| 149 |
+
"departureDate": "YYY-MM-DD",
|
| 150 |
+
"arrivalDate": "YYY-MM-DD" if tripType is one-way, enter "None" for this field,
|
| 151 |
+
"numAdults": X If no number is specified, assume 2,
|
| 152 |
+
"numChildren": X If no number is specified, assume 0,
|
| 153 |
+
"seat": either "economy", "premium-economy", "business", or "first". If no specification is made, enter "economy"
|
| 154 |
+
}}
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
# Use LLM to parse flight search parameters from prompt
|
| 158 |
+
search_info = self.call_llm(prompt = flight_info_prompt)
|
| 159 |
+
# convert to JSON
|
| 160 |
+
search_info_json = json.loads(search_info) # type: ignore
|
| 161 |
+
|
| 162 |
+
output = ["Flight Search Parameters:"]
|
| 163 |
+
output.append("--------------------------------------------------------")
|
| 164 |
+
output.append(f"Trip Type: {search_info_json.get("tripType")}, Seat Type: {search_info_json.get("seat")}")
|
| 165 |
+
output.append(f"Origin City: {search_info_json.get("originCity")}, Destination City: {search_info_json.get("destinationCity")}")
|
| 166 |
+
output.append(f"Origin Airport: {search_info_json.get("originAirport")}, Destination Airport: {search_info_json.get("destinationAirport")}")
|
| 167 |
+
if search_info_json.get("tripType") == "one-way":
|
| 168 |
+
output.append(f"Departure: {search_info_json.get("departureDate")}")
|
| 169 |
+
else:
|
| 170 |
+
output.append(f"Departure: {search_info_json.get("departureDate")}, Arrival: {search_info_json.get("arrivalDate")}")
|
| 171 |
+
output.append(f"Adults: {search_info_json.get("numAdults")}, Children: {search_info_json.get("numChildren")}\n")
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
# get info for outbound flight
|
| 176 |
+
outbound: Result = get_flights(
|
| 177 |
+
flight_data=[
|
| 178 |
+
FlightData(date=search_info_json.get("departureDate"), from_airport=search_info_json.get("originAirport"), to_airport=search_info_json.get("destinationAirport"))
|
| 179 |
+
],
|
| 180 |
+
trip="one-way",
|
| 181 |
+
seat=search_info_json.get("seat"),
|
| 182 |
+
passengers=Passengers(adults=search_info_json.get("numAdults"), children=search_info_json.get("numChildren"), infants_in_seat=0, infants_on_lap=0),
|
| 183 |
+
fetch_mode="fallback",
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
output.append("Outbound Flights:-----------------------------------")
|
| 187 |
+
output.append(f"Current Prices: {outbound.current_price}\n")
|
| 188 |
+
|
| 189 |
+
for flight in outbound.flights:
|
| 190 |
+
if flight.is_best == True:
|
| 191 |
+
#print(f"Is best?: {flight.is_best}")
|
| 192 |
+
output.append(f"Airline Name: {flight.name}")
|
| 193 |
+
output.append(f"Departure: {flight.departure}")
|
| 194 |
+
output.append(f"Arrival: {flight.arrival}")
|
| 195 |
+
output.append(f"Duration: {flight.duration}")
|
| 196 |
+
output.append(f"Stops: {flight.stops}")
|
| 197 |
+
output.append(f"Price: {flight.price}")
|
| 198 |
+
output.append("\n")
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
# If round-trip, get info for inbound flight as well. (fast-flights API doesn't support normal round-trip)
|
| 202 |
+
if search_info_json.get("tripType") == "round-trip":
|
| 203 |
+
inbound: Result = get_flights(
|
| 204 |
+
flight_data=[
|
| 205 |
+
FlightData(date=search_info_json.get("arrivalDate"), from_airport=search_info_json.get("destinationAirport"), to_airport=search_info_json.get("originAirport"))
|
| 206 |
+
],
|
| 207 |
+
trip="one-way",
|
| 208 |
+
seat=search_info_json.get("seat"),
|
| 209 |
+
passengers=Passengers(adults=search_info_json.get("numAdults"), children=search_info_json.get("numChildren"), infants_in_seat=0, infants_on_lap=0),
|
| 210 |
+
fetch_mode="fallback",
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
output.append("Inbound Flights:-----------------------------------")
|
| 214 |
+
output.append(f"Current Prices: {inbound.current_price}\n")
|
| 215 |
+
|
| 216 |
+
for flight in inbound.flights:
|
| 217 |
+
if flight.is_best == True:
|
| 218 |
+
#print(f"Is best?: {flight.is_best}")
|
| 219 |
+
output.append(f"Airline Name: {flight.name}")
|
| 220 |
+
output.append(f"Departure: {flight.departure}")
|
| 221 |
+
output.append(f"Arrival: {flight.arrival}")
|
| 222 |
+
output.append(f"Duration: {flight.duration}")
|
| 223 |
+
output.append(f"Stops: {flight.stops}")
|
| 224 |
+
output.append(f"Price: {flight.price}")
|
| 225 |
+
output.append("\n")
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
output_string = ""
|
| 229 |
+
for line in output:
|
| 230 |
+
output_string += str(line) + " \n"
|
| 231 |
+
|
| 232 |
+
print("\n\nFlight Output String:\n")
|
| 233 |
+
print(output_string)
|
| 234 |
+
|
| 235 |
+
return str(output_string)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def hotel_agent(self):
|
| 240 |
+
print("Running hotel agent.\n")
|
| 241 |
+
|
| 242 |
+
prompt = f"""Use this user message history to extract the hotel information the user is currently looking for:
|
| 243 |
+
user message history: {self.user_message_history}
|
| 244 |
+
format your response as a json exactly structured like below and nothing else. If you can't find information for a section, make an educated guess based on the message history.
|
| 245 |
+
If needed, today's date is {self.todays_date}.
|
| 246 |
+
|
| 247 |
+
{{
|
| 248 |
+
"city": three letter city iataCode,
|
| 249 |
+
"checkInDate": "YYY-MM-DD",
|
| 250 |
+
"checkOutDate": "YYY-MM-DD",
|
| 251 |
+
"numGuests": X If no number is specified, assume it is 2
|
| 252 |
+
}}
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
search_info = self.call_llm(prompt = prompt)
|
| 256 |
+
print(search_info)
|
| 257 |
+
print()
|
| 258 |
+
|
| 259 |
+
# convert to JSON
|
| 260 |
+
search_info_json = json.loads(search_info) # type: ignore
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
# Get list of hotels by city code
|
| 264 |
+
hotel_response = self.amadeus.reference_data.locations.hotels.by_city.get(cityCode=search_info_json.get("city"))
|
| 265 |
+
|
| 266 |
+
# Make list of hotel Ids
|
| 267 |
+
hotel_ids = []
|
| 268 |
+
for hotel in hotel_response.data:
|
| 269 |
+
hotel_ids.append(str(hotel.get("hotelId")))
|
| 270 |
+
#print(hotel)
|
| 271 |
+
|
| 272 |
+
try:
|
| 273 |
+
hotel_offers = self.amadeus.shopping.hotel_offers_search.get(
|
| 274 |
+
hotelIds = hotel_ids[0:30], # search through first number of hotel ids
|
| 275 |
+
checkInDate = search_info_json.get("checkInDate"),
|
| 276 |
+
checkOutDate = search_info_json.get("checkOutDate"),
|
| 277 |
+
adults = search_info_json.get("numGuests")
|
| 278 |
+
)
|
| 279 |
+
num_hotels = len(hotel_offers.data)
|
| 280 |
+
except:
|
| 281 |
+
return "Unable to find that match the search criteria."
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
if num_hotels >= 5:
|
| 287 |
+
hotels_to_display = hotel_offers.data[0:5]
|
| 288 |
+
elif num_hotels < 5 and num_hotels > 0:
|
| 289 |
+
hotels_to_display = hotel_offers.data[0:num_hotels]
|
| 290 |
+
else:
|
| 291 |
+
return "Unable to find that match the search criteria."
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
output = ["\nHotel Search Parameters:"]
|
| 295 |
+
output.append("--------------------------------------------------------")
|
| 296 |
+
output.append(f"Check In Date: {hotels_to_display[0].get("offers")[0].get("checkInDate")}") # type: ignore
|
| 297 |
+
output.append(f"Check Out Date: {hotels_to_display[0].get("offers")[0].get("checkOutDate")}") # type: ignore
|
| 298 |
+
output.append(f"City: {search_info_json.get("city")}")
|
| 299 |
+
|
| 300 |
+
guests_info = "Guests: "
|
| 301 |
+
for key in hotels_to_display[0].get("offers")[0].get("guests"): # type: ignore
|
| 302 |
+
guests_info += f"{key}: {hotels_to_display[0].get("offers")[0].get("guests").get(key)} " # type: ignore
|
| 303 |
+
output.append(guests_info)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
for hotel in hotels_to_display:
|
| 307 |
+
try:
|
| 308 |
+
output.append("")
|
| 309 |
+
output.append("")
|
| 310 |
+
for key in hotel:
|
| 311 |
+
if key == "hotel":
|
| 312 |
+
output.append(f"Hotel Name : {hotel.get(key).get("name")}") # type: ignore
|
| 313 |
+
|
| 314 |
+
elif key == "available":
|
| 315 |
+
output.append(f"Available : {hotel.get(key)}") # type: ignore
|
| 316 |
+
|
| 317 |
+
elif key == "offers":
|
| 318 |
+
for offer in hotel.get(key): # type: ignore
|
| 319 |
+
output.append(f"Room Type: {offer.get("room").get("typeEstimated").get("category")}")
|
| 320 |
+
output.append(f"Beds: {offer.get("room").get("typeEstimated").get("beds")} {offer.get("room").get("typeEstimated").get("bedType")}")
|
| 321 |
+
output.append(f"Price: {offer.get("price").get("currency")} base: ${offer.get("price").get("base")} total: ${offer.get("price").get("total")}")
|
| 322 |
+
output.append(f"Average Price per night ${offer.get("price").get("variations").get("average").get("base")}")
|
| 323 |
+
output.append(f"Description: {offer.get("room").get("description").get("text")}")
|
| 324 |
+
except:
|
| 325 |
+
print("Invalid Hotel Info")
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
output.append("")
|
| 329 |
+
output_string = ""
|
| 330 |
+
for line in output:
|
| 331 |
+
output_string += str(line) + " \n"
|
| 332 |
+
|
| 333 |
+
print("\n\nHotel Output String:")
|
| 334 |
+
print(output_string)
|
| 335 |
+
|
| 336 |
+
return str(output_string)
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def location_agent(self):
|
| 341 |
+
print("Running location agent.")
|
| 342 |
+
location_prompt = f"Use this user prompt to provide relevant information on the requested location including popular tourist attractions and activities: {self.input_prompt}"
|
| 343 |
+
response = self.call_llm(prompt = location_prompt)
|
| 344 |
+
print(response)
|
| 345 |
+
print()
|
| 346 |
+
return response
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def general_info_agent(self):
|
| 351 |
+
print("Running general info agent.")
|
| 352 |
+
general_prompt = f"Use this user prompt to provide relevant general information: {self.input_prompt}"
|
| 353 |
+
return self.call_llm(prompt = general_prompt)
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def call_llm(self, prompt):
|
| 360 |
+
"""function for general purpose LLM calls."""
|
| 361 |
+
print("--Running call_llm.")
|
| 362 |
+
|
| 363 |
+
# make chat history to send to LLM
|
| 364 |
+
history = [{
|
| 365 |
+
"role": "user",
|
| 366 |
+
"content": prompt,
|
| 367 |
+
}]
|
| 368 |
+
|
| 369 |
+
# Call LLM
|
| 370 |
+
completion = self.client.chat.completions.create(
|
| 371 |
+
model = self.LLM_model,
|
| 372 |
+
messages = history, # type: ignore
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
return completion.choices[0].message.content
|
packages.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# System packages required for the app (installed via apt-get)
|
| 2 |
+
curl
|
requirements.txt
CHANGED
|
@@ -1,3 +1,5 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
| 1 |
+
amadeus
|
| 2 |
+
fast_flights
|
| 3 |
+
groq
|
| 4 |
+
python-dotenv
|
| 5 |
+
streamlit
|
src/streamlit_app.py
DELETED
|
@@ -1,40 +0,0 @@
|
|
| 1 |
-
import altair as alt
|
| 2 |
-
import numpy as np
|
| 3 |
-
import pandas as pd
|
| 4 |
-
import streamlit as st
|
| 5 |
-
|
| 6 |
-
"""
|
| 7 |
-
# Welcome to Streamlit!
|
| 8 |
-
|
| 9 |
-
Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
|
| 10 |
-
If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
|
| 11 |
-
forums](https://discuss.streamlit.io).
|
| 12 |
-
|
| 13 |
-
In the meantime, below is an example of what you can do with just a few lines of code:
|
| 14 |
-
"""
|
| 15 |
-
|
| 16 |
-
num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
|
| 17 |
-
num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
|
| 18 |
-
|
| 19 |
-
indices = np.linspace(0, 1, num_points)
|
| 20 |
-
theta = 2 * np.pi * num_turns * indices
|
| 21 |
-
radius = indices
|
| 22 |
-
|
| 23 |
-
x = radius * np.cos(theta)
|
| 24 |
-
y = radius * np.sin(theta)
|
| 25 |
-
|
| 26 |
-
df = pd.DataFrame({
|
| 27 |
-
"x": x,
|
| 28 |
-
"y": y,
|
| 29 |
-
"idx": indices,
|
| 30 |
-
"rand": np.random.randn(num_points),
|
| 31 |
-
})
|
| 32 |
-
|
| 33 |
-
st.altair_chart(alt.Chart(df, height=700, width=700)
|
| 34 |
-
.mark_point(filled=True)
|
| 35 |
-
.encode(
|
| 36 |
-
x=alt.X("x", axis=None),
|
| 37 |
-
y=alt.Y("y", axis=None),
|
| 38 |
-
color=alt.Color("idx", legend=None, scale=alt.Scale()),
|
| 39 |
-
size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
|
| 40 |
-
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|