Spaces:
Sleeping
Sleeping
Commit Β·
100c46f
1
Parent(s): ef8ed7b
initial commit
Browse files- .huggingface.yml +31 -0
- agents/__init__.py +25 -0
- agents/__pycache__/__init__.cpython-312.pyc +0 -0
- agents/__pycache__/agent_system.cpython-312.pyc +0 -0
- agents/agent_factory.py +93 -0
- agents/base_agent.py +64 -0
- agents/coordinator.py +176 -0
- agents/hiking_agent.py +85 -0
- agents/place_agent.py +79 -0
- agents/restaurant_agent.py +40 -0
- agents/sentiment_agent.py +103 -0
- agents/web_agent.py +298 -0
- app.py +102 -0
- requirements.txt +11 -0
- services/__init__.py +1 -0
- services/__pycache__/__init__.cpython-312.pyc +0 -0
- services/__pycache__/hiking_service.cpython-312.pyc +0 -0
- services/__pycache__/place_service.cpython-312.pyc +0 -0
- services/__pycache__/restaurant_service.cpython-312.pyc +0 -0
- services/hiking_service.py +880 -0
- services/place_service.py +200 -0
- services/restaurant_service.py +219 -0
- utils/__init__.py +1 -0
- utils/__pycache__/__init__.cpython-312.pyc +0 -0
- utils/__pycache__/api_config.cpython-312.pyc +0 -0
- utils/__pycache__/formatting.cpython-312.pyc +0 -0
- utils/api_config.py +115 -0
- utils/formatting.py +138 -0
.huggingface.yml
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
sdk: gradio
|
| 2 |
+
sdk_version: 5.32.0
|
| 3 |
+
app_file: app.py
|
| 4 |
+
app_port: 7860
|
| 5 |
+
python_version: 3.11
|
| 6 |
+
command: "python app.py"
|
| 7 |
+
title: "MCP Travel Assistant - Client Interface"
|
| 8 |
+
short_description: "Intelligent MCP client with multi-agent system for travel planning, sentiment analysis, restaurant search, hiking recommendations, and web search"
|
| 9 |
+
colorFrom: blue
|
| 10 |
+
colorTo: purple
|
| 11 |
+
emoji: π
|
| 12 |
+
pinned: false
|
| 13 |
+
license: mit
|
| 14 |
+
startup_duration_timeout: 300s
|
| 15 |
+
hardware: cpu-basic
|
| 16 |
+
models:
|
| 17 |
+
- huggingface-hub
|
| 18 |
+
tags:
|
| 19 |
+
- gradio
|
| 20 |
+
- mcp
|
| 21 |
+
- travel
|
| 22 |
+
- sentiment-analysis
|
| 23 |
+
- places
|
| 24 |
+
- restaurants
|
| 25 |
+
- hiking
|
| 26 |
+
- web-search
|
| 27 |
+
- multi-agent
|
| 28 |
+
secrets:
|
| 29 |
+
- FOURSQUARE_API_KEY
|
| 30 |
+
- OPENAI_API_KEY
|
| 31 |
+
- API_KEY
|
agents/__init__.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Multi-Agent System for travel, sentiment analysis, and web search.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
# Export all the agent classes
|
| 6 |
+
from .base_agent import BaseAgent
|
| 7 |
+
from .sentiment_agent import SentimentAgent
|
| 8 |
+
from .place_agent import PlaceAgent
|
| 9 |
+
from .restaurant_agent import RestaurantAgent
|
| 10 |
+
from .hiking_agent import HikingAgent
|
| 11 |
+
from .web_agent import WebAgent
|
| 12 |
+
from .coordinator import CoordinatorAgent
|
| 13 |
+
from .agent_factory import AgentFactory, create_agent_system
|
| 14 |
+
|
| 15 |
+
__all__ = [
|
| 16 |
+
'BaseAgent',
|
| 17 |
+
'SentimentAgent',
|
| 18 |
+
'PlaceAgent',
|
| 19 |
+
'RestaurantAgent',
|
| 20 |
+
'HikingAgent',
|
| 21 |
+
'WebAgent',
|
| 22 |
+
'CoordinatorAgent',
|
| 23 |
+
'AgentFactory',
|
| 24 |
+
'create_agent_system'
|
| 25 |
+
]
|
agents/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (187 Bytes). View file
|
|
|
agents/__pycache__/agent_system.cpython-312.pyc
ADDED
|
Binary file (14 kB). View file
|
|
|
agents/agent_factory.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agent factory for creating and managing specialized agents.
|
| 3 |
+
"""
|
| 4 |
+
from typing import List
|
| 5 |
+
from smolagents import LiteLLMModel
|
| 6 |
+
|
| 7 |
+
# Import all specialized agents
|
| 8 |
+
from .sentiment_agent import SentimentAgent
|
| 9 |
+
from .place_agent import PlaceAgent
|
| 10 |
+
from .restaurant_agent import RestaurantAgent
|
| 11 |
+
from .hiking_agent import HikingAgent
|
| 12 |
+
from .web_agent import WebAgent
|
| 13 |
+
from .coordinator import CoordinatorAgent
|
| 14 |
+
from .base_agent import BaseAgent
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class AgentFactory:
|
| 18 |
+
"""Factory class for creating and managing specialized agents."""
|
| 19 |
+
|
| 20 |
+
def __init__(self, model: LiteLLMModel):
|
| 21 |
+
self.model = model
|
| 22 |
+
self.agents = self._create_agents()
|
| 23 |
+
self.coordinator = CoordinatorAgent(self.agents, model)
|
| 24 |
+
|
| 25 |
+
def _create_agents(self) -> List[BaseAgent]:
|
| 26 |
+
"""Create all specialized agents."""
|
| 27 |
+
return [
|
| 28 |
+
SentimentAgent("sentiment_agent", [], self.model),
|
| 29 |
+
PlaceAgent("place_agent", [], self.model),
|
| 30 |
+
RestaurantAgent("restaurant_agent", [], self.model),
|
| 31 |
+
HikingAgent("hiking_agent", [], self.model),
|
| 32 |
+
WebAgent("web_agent", [], self.model)
|
| 33 |
+
]
|
| 34 |
+
|
| 35 |
+
def get_coordinator(self) -> CoordinatorAgent:
|
| 36 |
+
"""Get the coordinator agent."""
|
| 37 |
+
return self.coordinator
|
| 38 |
+
|
| 39 |
+
def get_agent_by_name(self, name: str) -> BaseAgent:
|
| 40 |
+
"""Get a specific agent by name."""
|
| 41 |
+
for agent in self.agents:
|
| 42 |
+
if agent.name == name:
|
| 43 |
+
return agent
|
| 44 |
+
raise ValueError(f"Agent '{name}' not found")
|
| 45 |
+
|
| 46 |
+
def list_agents(self) -> List[str]:
|
| 47 |
+
"""List all available agent names."""
|
| 48 |
+
return [agent.name for agent in self.agents]
|
| 49 |
+
|
| 50 |
+
def process_query(self, query: str) -> str:
|
| 51 |
+
"""Process a query using the coordinator."""
|
| 52 |
+
return self.coordinator.process_query(query)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def create_agent_system(model: LiteLLMModel = None) -> AgentFactory:
|
| 56 |
+
"""Create a complete agent system with all specialized agents."""
|
| 57 |
+
if model is None:
|
| 58 |
+
model = LiteLLMModel()
|
| 59 |
+
|
| 60 |
+
return AgentFactory(model)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# Example usage and testing
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
# Initialize the model
|
| 66 |
+
model = LiteLLMModel()
|
| 67 |
+
|
| 68 |
+
# Create the agent system
|
| 69 |
+
agent_system = create_agent_system(model)
|
| 70 |
+
|
| 71 |
+
# Get the coordinator
|
| 72 |
+
coordinator = agent_system.get_coordinator()
|
| 73 |
+
|
| 74 |
+
# Example usage
|
| 75 |
+
print("π€ **Multi-Agent System Demo** π€\n")
|
| 76 |
+
|
| 77 |
+
# Test different types of queries
|
| 78 |
+
test_queries = [
|
| 79 |
+
"Find hiking trails and restaurants near Seattle",
|
| 80 |
+
"What's the weather like today?",
|
| 81 |
+
"What is machine learning?",
|
| 82 |
+
"How do I learn Python programming?",
|
| 83 |
+
"Analyze the sentiment of: This product is absolutely amazing!",
|
| 84 |
+
"Find hotels in New York"
|
| 85 |
+
]
|
| 86 |
+
|
| 87 |
+
for i, query in enumerate(test_queries, 1):
|
| 88 |
+
print(f"**Query #{i}:** {query}")
|
| 89 |
+
result = coordinator.process_query(query)
|
| 90 |
+
print(result)
|
| 91 |
+
print("\n" + "="*60 + "\n")
|
| 92 |
+
|
| 93 |
+
print("π **Demo Complete!** π")
|
agents/base_agent.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Base agent interface for all specialized agents.
|
| 3 |
+
"""
|
| 4 |
+
from typing import List, Dict, Any, Optional
|
| 5 |
+
from abc import ABC, abstractmethod
|
| 6 |
+
from smolagents import LiteLLMModel, CodeAgent, ToolCollection
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class BaseAgent(ABC):
|
| 10 |
+
"""Abstract base class for all specialized agents."""
|
| 11 |
+
|
| 12 |
+
def __init__(self, name: str, tools: List[Any], model: LiteLLMModel):
|
| 13 |
+
self.name = name
|
| 14 |
+
self.agent = CodeAgent(tools=tools, model=model)
|
| 15 |
+
self.capabilities = self._define_capabilities()
|
| 16 |
+
|
| 17 |
+
@abstractmethod
|
| 18 |
+
def _define_capabilities(self) -> Dict[str, float]:
|
| 19 |
+
"""Define what this agent is capable of handling and with what confidence."""
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
def can_handle(self, task: str) -> float:
|
| 23 |
+
"""Return confidence score for handling a task."""
|
| 24 |
+
task = task.lower()
|
| 25 |
+
max_confidence = 0.0
|
| 26 |
+
|
| 27 |
+
# Check each word in the task against capabilities
|
| 28 |
+
words = task.split()
|
| 29 |
+
for word in words:
|
| 30 |
+
for capability, confidence in self.capabilities.items():
|
| 31 |
+
# Check if the capability appears as a whole word
|
| 32 |
+
if capability in task and (
|
| 33 |
+
capability == word or # Exact word match
|
| 34 |
+
f" {capability} " in f" {task} " or # Word with spaces
|
| 35 |
+
capability.startswith(word) or # Word is prefix
|
| 36 |
+
word.startswith(capability) # Capability is prefix
|
| 37 |
+
):
|
| 38 |
+
max_confidence = max(max_confidence, confidence)
|
| 39 |
+
|
| 40 |
+
return max_confidence
|
| 41 |
+
|
| 42 |
+
@abstractmethod
|
| 43 |
+
def execute(self, task: str) -> str:
|
| 44 |
+
"""Execute the task and return the result."""
|
| 45 |
+
pass
|
| 46 |
+
|
| 47 |
+
def _execute_through_agent(self, task: str) -> str:
|
| 48 |
+
"""Common method to execute task through the underlying agent."""
|
| 49 |
+
try:
|
| 50 |
+
result = self.agent.run(task)
|
| 51 |
+
|
| 52 |
+
# If result is a dictionary with a 'result' key, extract it
|
| 53 |
+
if isinstance(result, dict) and 'result' in result:
|
| 54 |
+
return result['result']
|
| 55 |
+
|
| 56 |
+
# If result is already a string, return it
|
| 57 |
+
if isinstance(result, str):
|
| 58 |
+
return result
|
| 59 |
+
|
| 60 |
+
# Otherwise convert to string
|
| 61 |
+
return str(result)
|
| 62 |
+
|
| 63 |
+
except Exception as e:
|
| 64 |
+
return f"β Error executing task: {str(e)}"
|
agents/coordinator.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Coordinator agent that manages and routes queries to specialized agents.
|
| 3 |
+
"""
|
| 4 |
+
import re
|
| 5 |
+
from typing import List, Dict, Any
|
| 6 |
+
from smolagents import LiteLLMModel
|
| 7 |
+
from .base_agent import BaseAgent
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class CoordinatorAgent:
|
| 11 |
+
"""Coordinator that manages multiple specialized agents and routes queries appropriately."""
|
| 12 |
+
|
| 13 |
+
def __init__(self, specialized_agents: List[BaseAgent], model: LiteLLMModel):
|
| 14 |
+
self.agents = specialized_agents
|
| 15 |
+
self.model = model
|
| 16 |
+
|
| 17 |
+
def _decompose_query(self, query: str) -> List[str]:
|
| 18 |
+
"""Decompose a complex query into subtasks."""
|
| 19 |
+
if not query.strip():
|
| 20 |
+
return ["Please provide a query to process"]
|
| 21 |
+
|
| 22 |
+
query_lower = query.lower()
|
| 23 |
+
tasks = []
|
| 24 |
+
|
| 25 |
+
# Extract location if present
|
| 26 |
+
location_pattern = r"(?:in|at|near|around|close to)\s+([a-zA-Z\s,]+)(?:\.|$|\s)"
|
| 27 |
+
location_match = re.search(location_pattern, query_lower)
|
| 28 |
+
location = location_match.group(1).strip() if location_match else ""
|
| 29 |
+
|
| 30 |
+
# Direct task identification patterns
|
| 31 |
+
patterns = {
|
| 32 |
+
"sentiment": ["sentiment", "feeling", "opinion", "review", "analyze", "how is", "what do you think"],
|
| 33 |
+
"places": ["hotel", "hotels", "stay", "accommodation", "accommodations", "lodging", "motel", "resort"],
|
| 34 |
+
"restaurants": ["restaurant", "restaurants", "food", "dining", "eat", "dinner", "lunch", "breakfast", "cafe"],
|
| 35 |
+
"hiking": ["hike", "hikes", "hiking", "trail", "trails", "trek", "trekking", "outdoor", "mountain", "walk", "walking", "nature"],
|
| 36 |
+
"web_search": ["search", "web search", "find", "lookup", "google", "bing", "weather", "news", "current", "latest", "today", "information", "what is", "definition", "meaning", "stock", "price", "market", "finance", "recipe", "movie", "film", "tv show"]
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
# Check for direct task matches
|
| 40 |
+
found_match = False
|
| 41 |
+
for category, keywords in patterns.items():
|
| 42 |
+
if any(keyword in query_lower for keyword in keywords):
|
| 43 |
+
found_match = True
|
| 44 |
+
if category == "sentiment":
|
| 45 |
+
tasks.append(query) # Keep original query for sentiment analysis
|
| 46 |
+
elif category == "places" and location:
|
| 47 |
+
tasks.append(f"Find hotels in {location}")
|
| 48 |
+
elif category == "restaurants" and location:
|
| 49 |
+
tasks.append(f"Find restaurants in {location}")
|
| 50 |
+
elif category == "hiking" and location:
|
| 51 |
+
tasks.append(f"Find hiking trails in {location}")
|
| 52 |
+
elif category == "web_search":
|
| 53 |
+
tasks.append(query) # Keep original query for web search
|
| 54 |
+
elif category == "places":
|
| 55 |
+
tasks.append("Find hotels " + query)
|
| 56 |
+
elif category == "restaurants":
|
| 57 |
+
tasks.append("Find restaurants " + query)
|
| 58 |
+
elif category == "hiking":
|
| 59 |
+
tasks.append("Find hiking trails " + query)
|
| 60 |
+
|
| 61 |
+
# If no direct matches but location is found
|
| 62 |
+
if not found_match and location:
|
| 63 |
+
tasks.extend([
|
| 64 |
+
f"Find hotels in {location}",
|
| 65 |
+
f"Find restaurants in {location}",
|
| 66 |
+
f"Find hiking trails in {location}"
|
| 67 |
+
])
|
| 68 |
+
|
| 69 |
+
# If still no tasks, default to web search
|
| 70 |
+
if not tasks:
|
| 71 |
+
tasks.append(query) # Let web agent handle it
|
| 72 |
+
|
| 73 |
+
return tasks
|
| 74 |
+
|
| 75 |
+
def _assign_tasks(self, tasks: List[str]) -> Dict[str, List[str]]:
|
| 76 |
+
"""Assign tasks to specialized agents based on their capabilities."""
|
| 77 |
+
assignments = {agent.name: [] for agent in self.agents}
|
| 78 |
+
unassigned_tasks = []
|
| 79 |
+
|
| 80 |
+
for task in tasks:
|
| 81 |
+
best_agent = None
|
| 82 |
+
best_confidence = 0.1 # Lowered threshold to catch more cases
|
| 83 |
+
|
| 84 |
+
# First, try to assign to specialized agents (excluding web agent)
|
| 85 |
+
specialized_agents = [agent for agent in self.agents if agent.name != "web_agent"]
|
| 86 |
+
|
| 87 |
+
for agent in specialized_agents:
|
| 88 |
+
confidence = agent.can_handle(task)
|
| 89 |
+
if confidence > best_confidence:
|
| 90 |
+
best_confidence = confidence
|
| 91 |
+
best_agent = agent
|
| 92 |
+
|
| 93 |
+
if best_agent:
|
| 94 |
+
assignments[best_agent.name].append(task)
|
| 95 |
+
else:
|
| 96 |
+
unassigned_tasks.append(task)
|
| 97 |
+
|
| 98 |
+
# Route all unassigned tasks to web agent as fallback
|
| 99 |
+
if unassigned_tasks:
|
| 100 |
+
for task in unassigned_tasks:
|
| 101 |
+
assignments["web_agent"].append(task)
|
| 102 |
+
|
| 103 |
+
return assignments
|
| 104 |
+
|
| 105 |
+
def process_query(self, query: str) -> str:
|
| 106 |
+
"""Process a complex query by coordinating multiple specialized agents."""
|
| 107 |
+
try:
|
| 108 |
+
# Decompose the query into subtasks
|
| 109 |
+
tasks = self._decompose_query(query)
|
| 110 |
+
if not tasks:
|
| 111 |
+
return "β I couldn't understand how to help with that request. Please try rephrasing it."
|
| 112 |
+
|
| 113 |
+
# Assign tasks to specialized agents
|
| 114 |
+
assignments = self._assign_tasks(tasks)
|
| 115 |
+
|
| 116 |
+
# Execute tasks and collect results
|
| 117 |
+
results = []
|
| 118 |
+
has_meaningful_assignment = False
|
| 119 |
+
|
| 120 |
+
for agent in self.agents:
|
| 121 |
+
agent_tasks = assignments[agent.name]
|
| 122 |
+
if agent_tasks:
|
| 123 |
+
for task in agent_tasks:
|
| 124 |
+
try:
|
| 125 |
+
# For web agent, always execute (it's our fallback)
|
| 126 |
+
# For other agents, check confidence first
|
| 127 |
+
if agent.name == "web_agent":
|
| 128 |
+
has_meaningful_assignment = True
|
| 129 |
+
result = agent.execute(task)
|
| 130 |
+
if result and len(result.strip()) > 20:
|
| 131 |
+
results.append(result)
|
| 132 |
+
else:
|
| 133 |
+
# Check if this task was actually assigned based on agent capability
|
| 134 |
+
confidence = agent.can_handle(task)
|
| 135 |
+
if confidence > 0.1: # Only proceed if agent has some confidence
|
| 136 |
+
has_meaningful_assignment = True
|
| 137 |
+
result = agent.execute(task)
|
| 138 |
+
# Only add meaningful results (skip simple confirmations)
|
| 139 |
+
if result and len(result.strip()) > 20:
|
| 140 |
+
results.append(result)
|
| 141 |
+
except Exception as e:
|
| 142 |
+
results.append(f"β Error processing '{task}': {str(e)}")
|
| 143 |
+
|
| 144 |
+
# If no meaningful assignments were made or no results, provide helpful message
|
| 145 |
+
if not has_meaningful_assignment or not results:
|
| 146 |
+
return """
|
| 147 |
+
β **Unable to Process Query**
|
| 148 |
+
|
| 149 |
+
I couldn't find any agents to handle your request properly. This shouldn't happen with the web agent as fallback.
|
| 150 |
+
|
| 151 |
+
π― **Try these specific formats:**
|
| 152 |
+
β’ π¨ **Hotels:** "Find hotels in [city]"
|
| 153 |
+
β’ π½οΈ **Restaurants:** "Find restaurants in [city]"
|
| 154 |
+
β’ ποΈ **Hiking:** "Find hiking trails near [location]"
|
| 155 |
+
β’ π **Sentiment:** "Analyze sentiment of: [text]"
|
| 156 |
+
β’ π **Web Search:** "Search for [anything]" or just ask any question
|
| 157 |
+
|
| 158 |
+
π‘ **Example:** "What's the weather in Tokyo?" or "Find hotels in Paris"
|
| 159 |
+
""".strip()
|
| 160 |
+
|
| 161 |
+
# If only one result, return it directly
|
| 162 |
+
if len(results) == 1:
|
| 163 |
+
return results[0]
|
| 164 |
+
|
| 165 |
+
# If multiple results, combine them nicely
|
| 166 |
+
combined_output = "π― **Here's what I found for you:**\n\n"
|
| 167 |
+
for i, result in enumerate(results, 1):
|
| 168 |
+
combined_output += f"**Result #{i}:**\n"
|
| 169 |
+
combined_output += result + "\n\n"
|
| 170 |
+
if i < len(results):
|
| 171 |
+
combined_output += "β" * 60 + "\n\n"
|
| 172 |
+
|
| 173 |
+
return combined_output
|
| 174 |
+
|
| 175 |
+
except Exception as e:
|
| 176 |
+
return f"β Error processing query: {str(e)}"
|
agents/hiking_agent.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Hiking agent for outdoor activities and trail searches.
|
| 3 |
+
"""
|
| 4 |
+
import re
|
| 5 |
+
from typing import Dict
|
| 6 |
+
from .base_agent import BaseAgent
|
| 7 |
+
from services.hiking_service import HikingRecommendationServer
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class HikingAgent(BaseAgent):
|
| 11 |
+
"""Specialized agent for finding hiking trails and outdoor activities."""
|
| 12 |
+
|
| 13 |
+
def _define_capabilities(self) -> Dict[str, float]:
|
| 14 |
+
"""Define hiking search capabilities."""
|
| 15 |
+
return {
|
| 16 |
+
"hike": 0.9,
|
| 17 |
+
"hikes": 0.9,
|
| 18 |
+
"hiking": 0.9,
|
| 19 |
+
"trail": 0.9,
|
| 20 |
+
"trails": 0.9,
|
| 21 |
+
"trek": 0.8,
|
| 22 |
+
"trekking": 0.8,
|
| 23 |
+
"outdoor": 0.7,
|
| 24 |
+
"mountain": 0.7,
|
| 25 |
+
"walk": 0.6,
|
| 26 |
+
"walking": 0.6,
|
| 27 |
+
"nature": 0.6
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
def execute(self, task: str) -> str:
|
| 31 |
+
"""Execute hiking search task."""
|
| 32 |
+
try:
|
| 33 |
+
# Extract location and parameters
|
| 34 |
+
location_pattern = r"(?:in|at|near|around|close to)\s+([a-zA-Z\s,]+)(?:\.|$|\s)"
|
| 35 |
+
location_match = re.search(location_pattern, task.lower())
|
| 36 |
+
location = location_match.group(1).strip() if location_match else task
|
| 37 |
+
|
| 38 |
+
# Initialize hiking server
|
| 39 |
+
hiking_server = HikingRecommendationServer()
|
| 40 |
+
|
| 41 |
+
# Get difficulty from task if mentioned
|
| 42 |
+
difficulty = "All"
|
| 43 |
+
if "easy" in task.lower():
|
| 44 |
+
difficulty = "Easy"
|
| 45 |
+
elif "moderate" in task.lower():
|
| 46 |
+
difficulty = "Moderate"
|
| 47 |
+
elif "hard" in task.lower() or "difficult" in task.lower():
|
| 48 |
+
difficulty = "Hard"
|
| 49 |
+
elif "very hard" in task.lower() or "extreme" in task.lower():
|
| 50 |
+
difficulty = "Very Hard"
|
| 51 |
+
|
| 52 |
+
# Get distance from task if mentioned
|
| 53 |
+
distance_pattern = r"within\s+(\d+)\s*(?:mile|miles|mi)"
|
| 54 |
+
distance_match = re.search(distance_pattern, task.lower())
|
| 55 |
+
max_distance = int(distance_match.group(1)) if distance_match else 50
|
| 56 |
+
|
| 57 |
+
# Get recommendations
|
| 58 |
+
results = hiking_server.get_hiking_recommendations_structured(location, max_distance, difficulty)
|
| 59 |
+
|
| 60 |
+
if not results["success"]:
|
| 61 |
+
return results["message"]
|
| 62 |
+
|
| 63 |
+
# Format output
|
| 64 |
+
output = f"ποΈ **Hiking Trails near {location.title()}** ποΈ\n"
|
| 65 |
+
output += f"(Within {max_distance} miles, Difficulty: {difficulty})\n\n"
|
| 66 |
+
|
| 67 |
+
stats = results["stats"]
|
| 68 |
+
output += "π **Quick Stats:**\n"
|
| 69 |
+
output += f"β’ Total Trails: {stats['total_hikes']}\n"
|
| 70 |
+
output += f"β’ Average Distance: {stats['avg_distance']} miles\n"
|
| 71 |
+
output += f"β’ Average Elevation: {stats['avg_elevation']} ft\n\n"
|
| 72 |
+
|
| 73 |
+
for i, hike in enumerate(results['hikes'][:5], 1):
|
| 74 |
+
output += f"**#{i} {hike['name']}**\n"
|
| 75 |
+
output += f"π― **Difficulty:** {hike['difficulty_level']}\n"
|
| 76 |
+
output += f"π **Distance:** {hike['distance']} miles\n"
|
| 77 |
+
output += f"β°οΈ **Elevation:** {hike['elevation_gain']} ft\n"
|
| 78 |
+
output += f"β **Rating:** {hike['rating']} ({hike['reviews']} reviews)\n"
|
| 79 |
+
output += "β" * 40 + "\n\n"
|
| 80 |
+
|
| 81 |
+
output += "π₯Ύ *Happy hiking!* π₯Ύ"
|
| 82 |
+
return output
|
| 83 |
+
|
| 84 |
+
except Exception as e:
|
| 85 |
+
return f"β Error searching for hiking trails: {str(e)}"
|
agents/place_agent.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Place agent for hotel and accommodation searches.
|
| 3 |
+
"""
|
| 4 |
+
import re
|
| 5 |
+
from typing import Dict
|
| 6 |
+
from .base_agent import BaseAgent
|
| 7 |
+
from services.place_service import place_service
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class PlaceAgent(BaseAgent):
|
| 11 |
+
"""Specialized agent for finding hotels and accommodations."""
|
| 12 |
+
|
| 13 |
+
def _define_capabilities(self) -> Dict[str, float]:
|
| 14 |
+
"""Define place search capabilities."""
|
| 15 |
+
return {
|
| 16 |
+
"hotel": 0.9,
|
| 17 |
+
"hotels": 0.9,
|
| 18 |
+
"accommodation": 0.9,
|
| 19 |
+
"accommodations": 0.9,
|
| 20 |
+
"place to stay": 0.9,
|
| 21 |
+
"places to stay": 0.9,
|
| 22 |
+
"stay": 0.8,
|
| 23 |
+
"lodging": 0.8,
|
| 24 |
+
"motel": 0.8,
|
| 25 |
+
"resort": 0.8
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
def execute(self, task: str) -> str:
|
| 29 |
+
"""Execute place search task."""
|
| 30 |
+
try:
|
| 31 |
+
# Clean up the task for better tool matching
|
| 32 |
+
task = task.strip()
|
| 33 |
+
|
| 34 |
+
# Extract location and parameters
|
| 35 |
+
location_pattern = r"(?:in|at|near|around|close to)\s+([a-zA-Z\s,]+)(?:\.|$|\s)"
|
| 36 |
+
location_match = re.search(location_pattern, task.lower())
|
| 37 |
+
location = location_match.group(1).strip() if location_match else task
|
| 38 |
+
|
| 39 |
+
# Remove common prefixes to get clean location
|
| 40 |
+
location = location.replace('hotels in ', '').replace('hotel in ', '').replace('find hotels in ', '').strip()
|
| 41 |
+
|
| 42 |
+
# Get distance from task if mentioned
|
| 43 |
+
distance_pattern = r"within\s+(\d+)\s*(?:mile|miles|mi)"
|
| 44 |
+
distance_match = re.search(distance_pattern, task.lower())
|
| 45 |
+
max_distance = int(distance_match.group(1)) if distance_match else None
|
| 46 |
+
|
| 47 |
+
# Get recommendations using PlaceService
|
| 48 |
+
results = place_service.search_places(location, max_distance)
|
| 49 |
+
|
| 50 |
+
if "error" in results:
|
| 51 |
+
return f"β {results['error']}"
|
| 52 |
+
|
| 53 |
+
# Format output
|
| 54 |
+
output = f"π¨ **Hotels & Accommodations in {results['location'].title()}** π¨\n"
|
| 55 |
+
output += f"(Search radius: {results['search_radius']})\n\n"
|
| 56 |
+
|
| 57 |
+
output += f"π **Quick Stats:**\n"
|
| 58 |
+
output += f"β’ Total Found: {results['total_found']} places\n"
|
| 59 |
+
output += f"β’ Showing Top: {len(results['top_places'])} results\n\n"
|
| 60 |
+
|
| 61 |
+
for i, place in enumerate(results['top_places'], 1):
|
| 62 |
+
output += f"**#{i} {place['name']}** β\n"
|
| 63 |
+
output += f"π¨ **Type:** {place['type']}\n"
|
| 64 |
+
output += f"π **Address:** {place['address']}\n"
|
| 65 |
+
output += f"πΆ **Distance:** {place['distance']}\n"
|
| 66 |
+
|
| 67 |
+
if place['rating'] != 'Not rated':
|
| 68 |
+
output += f"β **Rating:** {place['rating']}\n"
|
| 69 |
+
|
| 70 |
+
if place['description'] != 'No description available':
|
| 71 |
+
output += f"βΉοΈ **About:** {place['description'][:150]}...\n"
|
| 72 |
+
|
| 73 |
+
output += "\n" + "β" * 50 + "\n\n"
|
| 74 |
+
|
| 75 |
+
output += "π *Happy travels!* π"
|
| 76 |
+
return output
|
| 77 |
+
|
| 78 |
+
except Exception as e:
|
| 79 |
+
return f"β Error searching for places: {str(e)}"
|
agents/restaurant_agent.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Restaurant agent for dining and food establishment searches.
|
| 3 |
+
"""
|
| 4 |
+
from typing import Dict
|
| 5 |
+
from .base_agent import BaseAgent
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class RestaurantAgent(BaseAgent):
|
| 9 |
+
"""Specialized agent for finding restaurants and dining options."""
|
| 10 |
+
|
| 11 |
+
def _define_capabilities(self) -> Dict[str, float]:
|
| 12 |
+
"""Define restaurant search capabilities."""
|
| 13 |
+
return {
|
| 14 |
+
"restaurant": 0.9,
|
| 15 |
+
"restaurants": 0.9,
|
| 16 |
+
"food": 0.8,
|
| 17 |
+
"dining": 0.8,
|
| 18 |
+
"eat": 0.7,
|
| 19 |
+
"dinner": 0.7,
|
| 20 |
+
"lunch": 0.7,
|
| 21 |
+
"breakfast": 0.7,
|
| 22 |
+
"cafe": 0.7,
|
| 23 |
+
"bar": 0.7
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
def execute(self, task: str) -> str:
|
| 27 |
+
"""Execute restaurant search task."""
|
| 28 |
+
try:
|
| 29 |
+
# Clean up the task
|
| 30 |
+
task = task.strip()
|
| 31 |
+
|
| 32 |
+
# Add "restaurant" prefix if not present for better matching
|
| 33 |
+
if not any(x in task.lower() for x in ["restaurant", "food", "dining", "eat"]):
|
| 34 |
+
task = f"Find restaurants in {task}"
|
| 35 |
+
|
| 36 |
+
# Use the base agent's execution method as a fallback
|
| 37 |
+
return self._execute_through_agent(task)
|
| 38 |
+
|
| 39 |
+
except Exception as e:
|
| 40 |
+
return f"β Error searching for restaurants: {str(e)}"
|
agents/sentiment_agent.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Sentiment analysis agent for analyzing text emotions and opinions.
|
| 3 |
+
"""
|
| 4 |
+
from typing import Dict
|
| 5 |
+
from textblob import TextBlob
|
| 6 |
+
from .base_agent import BaseAgent
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class SentimentAgent(BaseAgent):
|
| 10 |
+
"""Specialized agent for sentiment analysis and text emotion detection."""
|
| 11 |
+
|
| 12 |
+
def _define_capabilities(self) -> Dict[str, float]:
|
| 13 |
+
"""Define sentiment analysis capabilities."""
|
| 14 |
+
return {
|
| 15 |
+
"sentiment": 0.9,
|
| 16 |
+
"emotion": 0.8,
|
| 17 |
+
"review": 0.8,
|
| 18 |
+
"feeling": 0.7,
|
| 19 |
+
"opinion": 0.7,
|
| 20 |
+
"text analysis": 0.7,
|
| 21 |
+
"analyze": 0.6
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
def execute(self, task: str) -> str:
|
| 25 |
+
"""Execute sentiment analysis task."""
|
| 26 |
+
try:
|
| 27 |
+
# Clean up the task for sentiment analysis
|
| 28 |
+
task = task.strip()
|
| 29 |
+
|
| 30 |
+
# Extract text to analyze
|
| 31 |
+
text_to_analyze = self._extract_text_from_task(task)
|
| 32 |
+
|
| 33 |
+
# Perform sentiment analysis using TextBlob
|
| 34 |
+
blob = TextBlob(text_to_analyze)
|
| 35 |
+
polarity = blob.sentiment.polarity
|
| 36 |
+
subjectivity = blob.sentiment.subjectivity
|
| 37 |
+
|
| 38 |
+
# Determine sentiment category
|
| 39 |
+
if polarity > 0.1:
|
| 40 |
+
sentiment_label = "Positive π"
|
| 41 |
+
elif polarity < -0.1:
|
| 42 |
+
sentiment_label = "Negative π"
|
| 43 |
+
else:
|
| 44 |
+
sentiment_label = "Neutral π"
|
| 45 |
+
|
| 46 |
+
# Determine subjectivity category
|
| 47 |
+
if subjectivity > 0.5:
|
| 48 |
+
subjectivity_label = "Subjective (Opinion-based)"
|
| 49 |
+
else:
|
| 50 |
+
subjectivity_label = "Objective (Fact-based)"
|
| 51 |
+
|
| 52 |
+
# Format the response
|
| 53 |
+
result = f"""
|
| 54 |
+
π **Sentiment Analysis Result** π
|
| 55 |
+
|
| 56 |
+
π **Text Analyzed:** "{text_to_analyze}"
|
| 57 |
+
|
| 58 |
+
π **Analysis Results:**
|
| 59 |
+
β’ π **Sentiment:** {sentiment_label}
|
| 60 |
+
β’ π **Polarity Score:** {polarity:.2f} (Range: -1 to +1)
|
| 61 |
+
β’ π€ **Subjectivity:** {subjectivity_label}
|
| 62 |
+
β’ π **Subjectivity Score:** {subjectivity:.2f} (Range: 0 to 1)
|
| 63 |
+
|
| 64 |
+
π‘ **Interpretation:**
|
| 65 |
+
β’ **Polarity** indicates how positive or negative the text is
|
| 66 |
+
β’ **Subjectivity** shows how much personal opinion vs factual information the text contains
|
| 67 |
+
|
| 68 |
+
π― **Summary:** This text expresses a {sentiment_label.lower()} sentiment with {subjectivity_label.lower()} tone.
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
return result.strip()
|
| 72 |
+
|
| 73 |
+
except Exception as e:
|
| 74 |
+
return f"β Error analyzing sentiment: {str(e)}"
|
| 75 |
+
|
| 76 |
+
def _extract_text_from_task(self, task: str) -> str:
|
| 77 |
+
"""Extract the actual text to analyze from the task description."""
|
| 78 |
+
task_lower = task.lower()
|
| 79 |
+
|
| 80 |
+
# Common patterns for sentiment analysis requests
|
| 81 |
+
patterns = [
|
| 82 |
+
"sentiment of:",
|
| 83 |
+
"analyze the sentiment of:",
|
| 84 |
+
"what's the sentiment of:",
|
| 85 |
+
"sentiment analysis of:",
|
| 86 |
+
"analyze:",
|
| 87 |
+
"sentiment:",
|
| 88 |
+
]
|
| 89 |
+
|
| 90 |
+
for pattern in patterns:
|
| 91 |
+
if pattern in task_lower:
|
| 92 |
+
# Extract everything after the pattern
|
| 93 |
+
start_idx = task_lower.find(pattern) + len(pattern)
|
| 94 |
+
extracted_text = task[start_idx:].strip()
|
| 95 |
+
# Remove quotes if present
|
| 96 |
+
if extracted_text.startswith('"') and extracted_text.endswith('"'):
|
| 97 |
+
extracted_text = extracted_text[1:-1]
|
| 98 |
+
if extracted_text.startswith("'") and extracted_text.endswith("'"):
|
| 99 |
+
extracted_text = extracted_text[1:-1]
|
| 100 |
+
return extracted_text
|
| 101 |
+
|
| 102 |
+
# If no pattern found, return the whole task
|
| 103 |
+
return task
|
agents/web_agent.py
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Web search agent for handling general web search and information lookup queries.
|
| 3 |
+
"""
|
| 4 |
+
from typing import Dict
|
| 5 |
+
from .base_agent import BaseAgent
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class WebAgent(BaseAgent):
|
| 9 |
+
"""Specialized agent for web search and general information lookup."""
|
| 10 |
+
|
| 11 |
+
def _define_capabilities(self) -> Dict[str, float]:
|
| 12 |
+
"""Define web search capabilities."""
|
| 13 |
+
return {
|
| 14 |
+
"search": 0.9,
|
| 15 |
+
"web search": 0.9,
|
| 16 |
+
"find": 0.8,
|
| 17 |
+
"lookup": 0.8,
|
| 18 |
+
"google": 0.8,
|
| 19 |
+
"bing": 0.8,
|
| 20 |
+
"weather": 0.8,
|
| 21 |
+
"news": 0.8,
|
| 22 |
+
"current": 0.7,
|
| 23 |
+
"latest": 0.7,
|
| 24 |
+
"today": 0.7,
|
| 25 |
+
"information": 0.7,
|
| 26 |
+
"what is": 0.7,
|
| 27 |
+
"definition": 0.7,
|
| 28 |
+
"meaning": 0.7,
|
| 29 |
+
"stock": 0.7,
|
| 30 |
+
"price": 0.7,
|
| 31 |
+
"market": 0.7,
|
| 32 |
+
"finance": 0.7,
|
| 33 |
+
"recipe": 0.6,
|
| 34 |
+
"movie": 0.6,
|
| 35 |
+
"film": 0.6,
|
| 36 |
+
"tv show": 0.6
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
def execute(self, task: str) -> str:
|
| 40 |
+
"""Execute web search task."""
|
| 41 |
+
try:
|
| 42 |
+
query_lower = task.lower()
|
| 43 |
+
|
| 44 |
+
# Analyze query type and provide targeted responses
|
| 45 |
+
if any(word in query_lower for word in ['weather', 'temperature', 'forecast']):
|
| 46 |
+
return self._search_weather_info(task)
|
| 47 |
+
elif any(word in query_lower for word in ['news', 'current', 'latest', 'today']):
|
| 48 |
+
return self._search_news_info(task)
|
| 49 |
+
elif any(word in query_lower for word in ['stock', 'price', 'market', 'finance']):
|
| 50 |
+
return self._search_financial_info(task)
|
| 51 |
+
elif any(word in query_lower for word in ['definition', 'meaning', 'what is', 'define']):
|
| 52 |
+
return self._search_definition_info(task)
|
| 53 |
+
elif any(word in query_lower for word in ['recipe', 'cooking', 'how to cook']):
|
| 54 |
+
return self._search_recipe_info(task)
|
| 55 |
+
elif any(word in query_lower for word in ['movie', 'film', 'tv show', 'entertainment']):
|
| 56 |
+
return self._search_entertainment_info(task)
|
| 57 |
+
else:
|
| 58 |
+
return self._general_web_search(task)
|
| 59 |
+
|
| 60 |
+
except Exception as e:
|
| 61 |
+
return f"β Error performing web search: {str(e)}"
|
| 62 |
+
|
| 63 |
+
def _search_weather_info(self, query: str) -> str:
|
| 64 |
+
"""Search for weather information."""
|
| 65 |
+
return f"""
|
| 66 |
+
π€οΈ **Weather Search Results** π€οΈ
|
| 67 |
+
|
| 68 |
+
π **Query:** {query}
|
| 69 |
+
|
| 70 |
+
π **Weather Information:**
|
| 71 |
+
For real-time weather data, I recommend checking:
|
| 72 |
+
|
| 73 |
+
**π‘οΈ Primary Weather Sources:**
|
| 74 |
+
β’ **Weather.com** - Comprehensive forecasts and radar
|
| 75 |
+
β’ **AccuWeather** - Detailed hourly and daily forecasts
|
| 76 |
+
β’ **National Weather Service** - Official government weather data
|
| 77 |
+
β’ **Weather Underground** - Hyperlocal weather conditions
|
| 78 |
+
|
| 79 |
+
**π± Quick Access:**
|
| 80 |
+
β’ Search "weather [your location]" on Google
|
| 81 |
+
β’ Use your phone's built-in weather app
|
| 82 |
+
β’ Ask voice assistants like Siri, Alexa, or Google Assistant
|
| 83 |
+
|
| 84 |
+
**β‘ Real-time Features:**
|
| 85 |
+
β’ Current temperature and conditions
|
| 86 |
+
β’ Hourly and 10-day forecasts
|
| 87 |
+
β’ Radar and satellite imagery
|
| 88 |
+
β’ Weather alerts and warnings
|
| 89 |
+
|
| 90 |
+
*For the most accurate and up-to-date weather information, please visit one of the recommended weather services above.*
|
| 91 |
+
""".strip()
|
| 92 |
+
|
| 93 |
+
def _search_news_info(self, query: str) -> str:
|
| 94 |
+
"""Search for news information."""
|
| 95 |
+
return f"""
|
| 96 |
+
π° **News Search Results** π°
|
| 97 |
+
|
| 98 |
+
π **Query:** {query}
|
| 99 |
+
|
| 100 |
+
π **Current News Sources:**
|
| 101 |
+
For the latest news and current events:
|
| 102 |
+
|
| 103 |
+
**π Major News Outlets:**
|
| 104 |
+
β’ **BBC News** - International news and analysis
|
| 105 |
+
β’ **CNN** - Breaking news and politics
|
| 106 |
+
β’ **Reuters** - Global news and business
|
| 107 |
+
β’ **Associated Press (AP)** - Reliable news coverage
|
| 108 |
+
β’ **NPR** - In-depth reporting and analysis
|
| 109 |
+
|
| 110 |
+
**π News Aggregators:**
|
| 111 |
+
β’ **Google News** - Aggregated news from multiple sources
|
| 112 |
+
β’ **AllSides** - News from different political perspectives
|
| 113 |
+
β’ **Ground News** - Source diversity and bias ratings
|
| 114 |
+
|
| 115 |
+
**π± Quick Access:**
|
| 116 |
+
β’ Search your topic on Google News
|
| 117 |
+
β’ Use news apps on your mobile device
|
| 118 |
+
β’ Follow reputable news sources on social media
|
| 119 |
+
|
| 120 |
+
**β‘ Real-time Updates:**
|
| 121 |
+
For breaking news and live updates, check the websites or apps of major news organizations listed above.
|
| 122 |
+
|
| 123 |
+
*Always verify information from multiple reputable sources for accuracy.*
|
| 124 |
+
""".strip()
|
| 125 |
+
|
| 126 |
+
def _search_financial_info(self, query: str) -> str:
|
| 127 |
+
"""Search for financial information."""
|
| 128 |
+
return f"""
|
| 129 |
+
π **Financial Search Results** π
|
| 130 |
+
|
| 131 |
+
π **Query:** {query}
|
| 132 |
+
|
| 133 |
+
π **Financial Information Sources:**
|
| 134 |
+
For stock prices, market data, and financial news:
|
| 135 |
+
|
| 136 |
+
**πΉ Primary Financial Platforms:**
|
| 137 |
+
β’ **Yahoo Finance** - Free stock quotes and market data
|
| 138 |
+
β’ **Google Finance** - Quick stock lookups and charts
|
| 139 |
+
β’ **Bloomberg** - Professional financial news and data
|
| 140 |
+
β’ **MarketWatch** - Market news and stock analysis
|
| 141 |
+
β’ **Investing.com** - Global financial markets data
|
| 142 |
+
|
| 143 |
+
**π Investment Platforms:**
|
| 144 |
+
β’ **E*TRADE, TD Ameritrade, Fidelity** - Full-service trading
|
| 145 |
+
β’ **Robinhood, Webull** - Commission-free trading apps
|
| 146 |
+
β’ **Morningstar** - Investment research and analysis
|
| 147 |
+
|
| 148 |
+
**π± Quick Access:**
|
| 149 |
+
β’ Search "[stock symbol] stock price" on Google
|
| 150 |
+
β’ Use financial apps for real-time quotes
|
| 151 |
+
β’ Set up price alerts for stocks you're tracking
|
| 152 |
+
|
| 153 |
+
**β‘ Real-time Data:**
|
| 154 |
+
Most platforms offer real-time or delayed quotes, market charts, and financial news.
|
| 155 |
+
|
| 156 |
+
*Always consult with financial advisors for investment decisions. Past performance doesn't guarantee future results.*
|
| 157 |
+
""".strip()
|
| 158 |
+
|
| 159 |
+
def _search_definition_info(self, query: str) -> str:
|
| 160 |
+
"""Search for definitions and explanations."""
|
| 161 |
+
# Extract the term being defined
|
| 162 |
+
term = query.lower().replace('what is ', '').replace('define ', '').replace('definition of ', '').replace('meaning of ', '').strip()
|
| 163 |
+
|
| 164 |
+
return f"""
|
| 165 |
+
π **Definition Search Results** π
|
| 166 |
+
|
| 167 |
+
π **Query:** {query}
|
| 168 |
+
π **Term:** {term.title()}
|
| 169 |
+
|
| 170 |
+
**π Dictionary Sources:**
|
| 171 |
+
β’ **Merriam-Webster** - Authoritative dictionary definitions
|
| 172 |
+
β’ **Oxford English Dictionary** - Comprehensive word origins and meanings
|
| 173 |
+
β’ **Cambridge Dictionary** - Clear definitions with examples
|
| 174 |
+
β’ **Dictionary.com** - Quick definitions and synonyms
|
| 175 |
+
|
| 176 |
+
**π§ Knowledge Sources:**
|
| 177 |
+
β’ **Wikipedia** - Detailed explanations and background
|
| 178 |
+
β’ **Britannica** - Scholarly articles and definitions
|
| 179 |
+
β’ **Khan Academy** - Educational explanations
|
| 180 |
+
β’ **Simple Wikipedia** - Easy-to-understand explanations
|
| 181 |
+
|
| 182 |
+
**π± Quick Access:**
|
| 183 |
+
β’ Search "define [term]" on Google
|
| 184 |
+
β’ Use dictionary apps on your phone
|
| 185 |
+
β’ Ask voice assistants for quick definitions
|
| 186 |
+
|
| 187 |
+
**π‘ Learning Tips:**
|
| 188 |
+
β’ Look up related terms and synonyms
|
| 189 |
+
β’ Check pronunciation guides
|
| 190 |
+
β’ Read example sentences to understand usage
|
| 191 |
+
|
| 192 |
+
*For academic or technical terms, consider specialized dictionaries or educational resources in that field.*
|
| 193 |
+
""".strip()
|
| 194 |
+
|
| 195 |
+
def _search_recipe_info(self, query: str) -> str:
|
| 196 |
+
"""Search for recipe information."""
|
| 197 |
+
return f"""
|
| 198 |
+
π¨βπ³ **Recipe Search Results** π¨βπ³
|
| 199 |
+
|
| 200 |
+
π **Query:** {query}
|
| 201 |
+
|
| 202 |
+
π **Popular Recipe Sources:**
|
| 203 |
+
For cooking instructions and recipe ideas:
|
| 204 |
+
|
| 205 |
+
**π½οΈ Top Recipe Websites:**
|
| 206 |
+
β’ **AllRecipes** - User-submitted recipes with ratings
|
| 207 |
+
β’ **Food Network** - Professional chef recipes and tips
|
| 208 |
+
β’ **Tasty (BuzzFeed)** - Quick and easy recipe videos
|
| 209 |
+
β’ **Serious Eats** - Science-based cooking techniques
|
| 210 |
+
β’ **BBC Good Food** - Tested recipes with photos
|
| 211 |
+
|
| 212 |
+
**πΊ Video Cooking:**
|
| 213 |
+
β’ **YouTube Cooking Channels** - Step-by-step video guides
|
| 214 |
+
β’ **Food Network shows** - Professional cooking demonstrations
|
| 215 |
+
β’ **TikTok/Instagram** - Quick recipe videos and tips
|
| 216 |
+
|
| 217 |
+
**π± Recipe Apps:**
|
| 218 |
+
β’ **Yummly** - Personalized recipe recommendations
|
| 219 |
+
β’ **Paprika** - Recipe organization and meal planning
|
| 220 |
+
β’ **BigOven** - Recipe sharing and grocery lists
|
| 221 |
+
|
| 222 |
+
**π Shopping Integration:**
|
| 223 |
+
Many recipe sites offer grocery list generation and ingredient delivery options.
|
| 224 |
+
|
| 225 |
+
*Always read reviews and check cooking times. Adjust recipes based on your taste preferences and dietary needs.*
|
| 226 |
+
""".strip()
|
| 227 |
+
|
| 228 |
+
def _search_entertainment_info(self, query: str) -> str:
|
| 229 |
+
"""Search for entertainment information."""
|
| 230 |
+
return f"""
|
| 231 |
+
π¬ **Entertainment Search Results** π¬
|
| 232 |
+
|
| 233 |
+
π **Query:** {query}
|
| 234 |
+
|
| 235 |
+
π **Entertainment Information Sources:**
|
| 236 |
+
|
| 237 |
+
**π Movie & TV Databases:**
|
| 238 |
+
β’ **IMDb** - Comprehensive movie and TV show database
|
| 239 |
+
β’ **Rotten Tomatoes** - Movie reviews and ratings
|
| 240 |
+
β’ **Metacritic** - Professional critic scores
|
| 241 |
+
β’ **The Movie Database (TMDb)** - Open movie database
|
| 242 |
+
|
| 243 |
+
**πΊ Streaming Platforms:**
|
| 244 |
+
β’ **Netflix, Hulu, Amazon Prime** - Original and licensed content
|
| 245 |
+
β’ **Disney+, HBO Max, Apple TV+** - Premium streaming services
|
| 246 |
+
β’ **YouTube, Tubi, Crackle** - Free streaming options
|
| 247 |
+
|
| 248 |
+
**πͺ Entertainment News:**
|
| 249 |
+
β’ **Entertainment Weekly** - Celebrity news and reviews
|
| 250 |
+
β’ **Variety** - Industry news and analysis
|
| 251 |
+
β’ **The Hollywood Reporter** - Film and TV industry updates
|
| 252 |
+
β’ **E! Online** - Celebrity gossip and entertainment news
|
| 253 |
+
|
| 254 |
+
**π± Quick Access:**
|
| 255 |
+
β’ Search movie/show titles on Google for quick info
|
| 256 |
+
β’ Use streaming platform apps to find where to watch
|
| 257 |
+
β’ Check social media for trending entertainment topics
|
| 258 |
+
|
| 259 |
+
*For the latest releases and showtimes, check your local theater websites or movie apps like Fandango.*
|
| 260 |
+
""".strip()
|
| 261 |
+
|
| 262 |
+
def _general_web_search(self, query: str) -> str:
|
| 263 |
+
"""General web search for queries that don't fit specific categories."""
|
| 264 |
+
return f"""
|
| 265 |
+
π **General Web Search Results** π
|
| 266 |
+
|
| 267 |
+
π **Query:** {query}
|
| 268 |
+
|
| 269 |
+
π **Recommended Search Approach:**
|
| 270 |
+
For comprehensive information on your topic:
|
| 271 |
+
|
| 272 |
+
**π Primary Search Engines:**
|
| 273 |
+
β’ **Google** - Most comprehensive web search
|
| 274 |
+
β’ **Bing** - Microsoft's search engine with unique features
|
| 275 |
+
β’ **DuckDuckGo** - Privacy-focused search without tracking
|
| 276 |
+
β’ **Yahoo Search** - Alternative search with news integration
|
| 277 |
+
|
| 278 |
+
**π Knowledge Sources:**
|
| 279 |
+
β’ **Wikipedia** - General encyclopedia articles
|
| 280 |
+
β’ **Reddit** - Community discussions and opinions
|
| 281 |
+
β’ **Quora** - Question and answer platform
|
| 282 |
+
β’ **Stack Overflow** - Technical questions and answers
|
| 283 |
+
|
| 284 |
+
**π― Search Tips:**
|
| 285 |
+
β’ Use specific keywords related to your topic
|
| 286 |
+
β’ Try different phrasings of your question
|
| 287 |
+
β’ Use quotation marks for exact phrase searches
|
| 288 |
+
β’ Add site:specific-site.com to search within a website
|
| 289 |
+
|
| 290 |
+
**π± Search Tools:**
|
| 291 |
+
β’ Google's specialized searches (Images, Scholar, News)
|
| 292 |
+
β’ Use advanced search filters for better results
|
| 293 |
+
β’ Try voice search on mobile devices
|
| 294 |
+
|
| 295 |
+
**π‘ Pro Tip:** For the most current and comprehensive information about "{query}", I recommend using Google or another major search engine directly.
|
| 296 |
+
|
| 297 |
+
*Search engines are updated constantly with the latest information from across the web.*
|
| 298 |
+
""".strip()
|
app.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Main entry point for the MCP Agent Hackathon system.
|
| 4 |
+
Supports multiple modes: server, client, and agents.
|
| 5 |
+
"""
|
| 6 |
+
import sys
|
| 7 |
+
import os
|
| 8 |
+
import argparse
|
| 9 |
+
import asyncio
|
| 10 |
+
import logging
|
| 11 |
+
|
| 12 |
+
# Add the current directory to Python path for imports
|
| 13 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 14 |
+
|
| 15 |
+
from utils.api_config import api_config
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def run_client():
|
| 19 |
+
"""Run the MCP client with agent system."""
|
| 20 |
+
print("π€ Starting MCP Client...")
|
| 21 |
+
from client import main as client_main
|
| 22 |
+
client_main()
|
| 23 |
+
|
| 24 |
+
def run_agents():
|
| 25 |
+
"""Run the modular agent system directly."""
|
| 26 |
+
print("π§ Starting Modular Agent System...")
|
| 27 |
+
from agents import create_agent_system
|
| 28 |
+
|
| 29 |
+
# Create the agent system
|
| 30 |
+
agent_system = create_agent_system()
|
| 31 |
+
coordinator = agent_system.get_coordinator()
|
| 32 |
+
|
| 33 |
+
print("\nπ€ **Multi-Agent System Demo** π€")
|
| 34 |
+
print(f"π Available Agents: {', '.join(agent_system.list_agents())}")
|
| 35 |
+
print("=" * 60)
|
| 36 |
+
|
| 37 |
+
# Interactive mode
|
| 38 |
+
print("\n㪠**Interactive Mode** (type 'quit' to exit)")
|
| 39 |
+
print("π― **Try these examples:**")
|
| 40 |
+
print("β’ 'Find hotels in Paris'")
|
| 41 |
+
print("β’ 'What's the weather like today?'")
|
| 42 |
+
print("β’ 'Analyze sentiment: This is amazing!'")
|
| 43 |
+
print("β’ 'Find hiking trails near Denver'")
|
| 44 |
+
print("-" * 60)
|
| 45 |
+
|
| 46 |
+
while True:
|
| 47 |
+
try:
|
| 48 |
+
query = input("\nπ£οΈ Your Query: ").strip()
|
| 49 |
+
|
| 50 |
+
if not query:
|
| 51 |
+
continue
|
| 52 |
+
|
| 53 |
+
if query.lower() in ['quit', 'exit', 'q']:
|
| 54 |
+
print("π Goodbye!")
|
| 55 |
+
break
|
| 56 |
+
|
| 57 |
+
print(f"\nπ Processing: {query}")
|
| 58 |
+
print("-" * 40)
|
| 59 |
+
|
| 60 |
+
result = coordinator.process_query(query)
|
| 61 |
+
print(result)
|
| 62 |
+
|
| 63 |
+
print("\n" + "=" * 60)
|
| 64 |
+
|
| 65 |
+
except KeyboardInterrupt:
|
| 66 |
+
print("\n\nπ Goodbye!")
|
| 67 |
+
break
|
| 68 |
+
except Exception as e:
|
| 69 |
+
print(f"β Error: {str(e)}")
|
| 70 |
+
|
| 71 |
+
def main():
|
| 72 |
+
"""Main entry point with command line argument parsing."""
|
| 73 |
+
parser = argparse.ArgumentParser(
|
| 74 |
+
description="Modular Travel & Text Analysis System",
|
| 75 |
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 76 |
+
epilog="""
|
| 77 |
+
Examples:
|
| 78 |
+
python main.py # Start the MCP client with agent system
|
| 79 |
+
python main.py --help # Show this help message
|
| 80 |
+
"""
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
parser.add_argument(
|
| 84 |
+
'--check-apis',
|
| 85 |
+
action='store_true',
|
| 86 |
+
help='Check API connectivity before starting'
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
args = parser.parse_args()
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
# Check API status if requested
|
| 93 |
+
if args.check_apis:
|
| 94 |
+
api_config.print_status()
|
| 95 |
+
print()
|
| 96 |
+
|
| 97 |
+
# Always run in client mode
|
| 98 |
+
run_client()
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
if __name__ == "__main__":
|
| 102 |
+
main()
|
requirements.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio[mcp]==5.33.0
|
| 2 |
+
textblob==0.19.0
|
| 3 |
+
smolagents[mcp]
|
| 4 |
+
smolagents[litellm]
|
| 5 |
+
huggingface-hub==0.32.4
|
| 6 |
+
beautifulsoup4==4.13.4
|
| 7 |
+
lxml==5.4.0
|
| 8 |
+
requests==2.32.3
|
| 9 |
+
mcp==1.9.0
|
| 10 |
+
aiofiles==24.1.0
|
| 11 |
+
httpx==0.28.1
|
services/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Services package for business logic and external API interactions
|
services/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (178 Bytes). View file
|
|
|
services/__pycache__/hiking_service.cpython-312.pyc
ADDED
|
Binary file (37.9 kB). View file
|
|
|
services/__pycache__/place_service.cpython-312.pyc
ADDED
|
Binary file (6.84 kB). View file
|
|
|
services/__pycache__/restaurant_service.cpython-312.pyc
ADDED
|
Binary file (9.34 kB). View file
|
|
|
services/hiking_service.py
ADDED
|
@@ -0,0 +1,880 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import requests
|
| 3 |
+
import math
|
| 4 |
+
from typing import List, Dict, Tuple
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
class HikingRecommendationServer:
|
| 8 |
+
def __init__(self):
|
| 9 |
+
self.difficulty_weights = {
|
| 10 |
+
'distance': 0.3,
|
| 11 |
+
'elevation_gain': 0.4,
|
| 12 |
+
'trail_type': 0.2,
|
| 13 |
+
'user_ratings': 0.1
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
def get_user_location(self, location_input: str) -> Tuple[float, float]:
|
| 17 |
+
"""Get coordinates from location string using a geocoding service"""
|
| 18 |
+
# Enhanced location coordinates mapping
|
| 19 |
+
location_coords = {
|
| 20 |
+
# West Coast
|
| 21 |
+
'seattle': (47.6062, -122.3321),
|
| 22 |
+
'portland': (45.5155, -122.6789),
|
| 23 |
+
'san francisco': (37.7749, -122.4194),
|
| 24 |
+
'los angeles': (34.0522, -118.2437),
|
| 25 |
+
'san diego': (32.7157, -117.1611),
|
| 26 |
+
'sacramento': (38.5816, -121.4944),
|
| 27 |
+
'lake tahoe': (39.0968, -120.0324),
|
| 28 |
+
'yosemite': (37.8651, -119.5383),
|
| 29 |
+
|
| 30 |
+
# Mountain Region
|
| 31 |
+
'denver': (39.7392, -104.9903),
|
| 32 |
+
'boulder': (40.0150, -105.2705),
|
| 33 |
+
'salt lake city': (40.7608, -111.8910),
|
| 34 |
+
'phoenix': (33.4484, -112.0740),
|
| 35 |
+
'flagstaff': (35.1983, -111.6513),
|
| 36 |
+
'aspen': (39.1911, -106.8175),
|
| 37 |
+
'yellowstone': (44.4280, -110.5885),
|
| 38 |
+
|
| 39 |
+
# Southwest
|
| 40 |
+
'santa fe': (35.6870, -105.9378),
|
| 41 |
+
'sedona': (34.8697, -111.7600),
|
| 42 |
+
'moab': (38.5733, -109.5498),
|
| 43 |
+
|
| 44 |
+
# Northeast
|
| 45 |
+
'new york': (40.7128, -74.0060),
|
| 46 |
+
'boston': (42.3601, -71.0589),
|
| 47 |
+
'portland me': (43.6591, -70.2568),
|
| 48 |
+
'burlington': (44.4759, -73.2121),
|
| 49 |
+
'acadia': (44.3386, -68.2733),
|
| 50 |
+
|
| 51 |
+
# Southeast
|
| 52 |
+
'asheville': (35.5951, -82.5515),
|
| 53 |
+
'atlanta': (33.7490, -84.3880),
|
| 54 |
+
'nashville': (36.1627, -86.7816),
|
| 55 |
+
'great smoky mountains': (35.6131, -83.5532),
|
| 56 |
+
|
| 57 |
+
# Midwest
|
| 58 |
+
'chicago': (41.8781, -87.6298),
|
| 59 |
+
'minneapolis': (44.9778, -93.2650),
|
| 60 |
+
'madison': (43.0731, -89.4012),
|
| 61 |
+
|
| 62 |
+
# Pacific Northwest
|
| 63 |
+
'olympic national park': (47.8021, -123.6044),
|
| 64 |
+
'mount rainier': (46.8523, -121.7603),
|
| 65 |
+
'north cascades': (48.7718, -121.2985),
|
| 66 |
+
'mount hood': (45.3737, -121.6956),
|
| 67 |
+
'crater lake': (42.9446, -122.1090),
|
| 68 |
+
'lynnwood': (47.8209, -122.3151)
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
# Clean and normalize the input
|
| 72 |
+
location_lower = location_input.lower().strip()
|
| 73 |
+
|
| 74 |
+
# Try direct match first
|
| 75 |
+
for city, coords in location_coords.items():
|
| 76 |
+
if city in location_lower:
|
| 77 |
+
return coords
|
| 78 |
+
|
| 79 |
+
# Try partial matches if no direct match found
|
| 80 |
+
for city, coords in location_coords.items():
|
| 81 |
+
city_parts = city.split()
|
| 82 |
+
if any(part in location_lower for part in city_parts):
|
| 83 |
+
return coords
|
| 84 |
+
|
| 85 |
+
# If no match found, try to extract state information
|
| 86 |
+
states = {
|
| 87 |
+
'california': (36.7783, -119.4179),
|
| 88 |
+
'oregon': (44.5720, -122.0709),
|
| 89 |
+
'washington': (47.7511, -120.7401),
|
| 90 |
+
'colorado': (39.5501, -105.7821),
|
| 91 |
+
'utah': (39.3210, -111.0937),
|
| 92 |
+
'arizona': (34.0489, -111.0937),
|
| 93 |
+
'new mexico': (34.5199, -105.8701),
|
| 94 |
+
'montana': (46.8797, -110.3626),
|
| 95 |
+
'wyoming': (43.0760, -107.2903),
|
| 96 |
+
'idaho': (44.0682, -114.7420),
|
| 97 |
+
'nevada': (38.8026, -116.4194)
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
for state, coords in states.items():
|
| 101 |
+
if state in location_lower:
|
| 102 |
+
return coords
|
| 103 |
+
|
| 104 |
+
# Default to a central US location if nothing found
|
| 105 |
+
print(f"Warning: Could not find coordinates for '{location_input}', using default location")
|
| 106 |
+
return (39.8283, -98.5795) # Geographic center of the contiguous United States
|
| 107 |
+
|
| 108 |
+
def calculate_distance(self, lat1: float, lon1: float, lat2: float, lon2: float) -> float:
|
| 109 |
+
"""Calculate distance between two coordinates in miles"""
|
| 110 |
+
R = 3959 # Earth's radius in miles
|
| 111 |
+
|
| 112 |
+
lat1_rad = math.radians(lat1)
|
| 113 |
+
lat2_rad = math.radians(lat2)
|
| 114 |
+
delta_lat = math.radians(lat2 - lat1)
|
| 115 |
+
delta_lon = math.radians(lon2 - lon1)
|
| 116 |
+
|
| 117 |
+
a = (math.sin(delta_lat / 2) ** 2 +
|
| 118 |
+
math.cos(lat1_rad) * math.cos(lat2_rad) * math.sin(delta_lon / 2) ** 2)
|
| 119 |
+
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
|
| 120 |
+
|
| 121 |
+
return R * c
|
| 122 |
+
|
| 123 |
+
def get_real_hiking_data(self, user_lat: float, user_lon: float, radius: int = 50) -> List[Dict]:
|
| 124 |
+
"""Fetch real hiking data from multiple vetted sources"""
|
| 125 |
+
hikes = []
|
| 126 |
+
|
| 127 |
+
# Method 1: OpenStreetMap Overpass API for hiking trails
|
| 128 |
+
try:
|
| 129 |
+
hikes.extend(self.fetch_osm_trails(user_lat, user_lon, radius))
|
| 130 |
+
except Exception as e:
|
| 131 |
+
print(f"OSM API error: {e}")
|
| 132 |
+
|
| 133 |
+
# Method 2: Recreation.gov API for national/state parks
|
| 134 |
+
try:
|
| 135 |
+
hikes.extend(self.fetch_recreation_gov_trails(user_lat, user_lon, radius))
|
| 136 |
+
except Exception as e:
|
| 137 |
+
print(f"Recreation.gov API error: {e}")
|
| 138 |
+
|
| 139 |
+
# Method 3: Hiking Project API (if available)
|
| 140 |
+
try:
|
| 141 |
+
hikes.extend(self.fetch_hiking_project_trails(user_lat, user_lon, radius))
|
| 142 |
+
except Exception as e:
|
| 143 |
+
print(f"Hiking Project API error: {e}")
|
| 144 |
+
|
| 145 |
+
# Fallback: If no real data found, use curated sample data
|
| 146 |
+
if not hikes:
|
| 147 |
+
print("Using fallback curated data...")
|
| 148 |
+
hikes = self.get_curated_fallback_data(user_lat, user_lon, radius)
|
| 149 |
+
|
| 150 |
+
return hikes
|
| 151 |
+
|
| 152 |
+
def fetch_osm_trails(self, lat: float, lon: float, radius_miles: int) -> List[Dict]:
|
| 153 |
+
"""Fetch hiking trails from OpenStreetMap"""
|
| 154 |
+
# Convert radius from miles to meters
|
| 155 |
+
radius_meters = radius_miles * 1609.34
|
| 156 |
+
|
| 157 |
+
overpass_url = "http://overpass-api.de/api/interpreter"
|
| 158 |
+
|
| 159 |
+
# Overpass query for hiking trails
|
| 160 |
+
query = f"""
|
| 161 |
+
[out:json][timeout:25];
|
| 162 |
+
(
|
| 163 |
+
way["highway"="path"]["foot"="yes"](around:{radius_meters},{lat},{lon});
|
| 164 |
+
way["highway"="footway"](around:{radius_meters},{lat},{lon});
|
| 165 |
+
way["route"="hiking"](around:{radius_meters},{lat},{lon});
|
| 166 |
+
relation["route"="hiking"](around:{radius_meters},{lat},{lon});
|
| 167 |
+
);
|
| 168 |
+
out geom;
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
try:
|
| 172 |
+
response = requests.get(overpass_url, params={'data': query}, timeout=30)
|
| 173 |
+
response.raise_for_status()
|
| 174 |
+
data = response.json()
|
| 175 |
+
|
| 176 |
+
trails = []
|
| 177 |
+
for element in data.get('elements', []):
|
| 178 |
+
if 'tags' in element:
|
| 179 |
+
tags = element['tags']
|
| 180 |
+
|
| 181 |
+
# Extract trail information
|
| 182 |
+
name = tags.get('name', f"Trail near {lat:.3f}, {lon:.3f}")
|
| 183 |
+
|
| 184 |
+
# Calculate approximate trail stats
|
| 185 |
+
trail_coords = []
|
| 186 |
+
if element['type'] == 'way' and 'nodes' in element:
|
| 187 |
+
# For ways, we have geometry
|
| 188 |
+
if 'geometry' in element:
|
| 189 |
+
trail_coords = [(node['lat'], node['lon']) for node in element['geometry']]
|
| 190 |
+
|
| 191 |
+
# Calculate trail distance and elevation (simplified)
|
| 192 |
+
distance = self.calculate_trail_distance(trail_coords) if trail_coords else np.random.uniform(2, 8)
|
| 193 |
+
|
| 194 |
+
# Estimate elevation gain based on terrain tags
|
| 195 |
+
elevation_gain = self.estimate_elevation_gain(tags, distance)
|
| 196 |
+
|
| 197 |
+
# Determine trail type
|
| 198 |
+
trail_type = self.determine_trail_type(tags)
|
| 199 |
+
|
| 200 |
+
# Extract features
|
| 201 |
+
features = self.extract_trail_features(tags)
|
| 202 |
+
|
| 203 |
+
# Get trail center coordinates
|
| 204 |
+
if trail_coords:
|
| 205 |
+
trail_lat = np.mean([coord[0] for coord in trail_coords])
|
| 206 |
+
trail_lon = np.mean([coord[1] for coord in trail_coords])
|
| 207 |
+
else:
|
| 208 |
+
trail_lat = lat + np.random.uniform(-0.1, 0.1)
|
| 209 |
+
trail_lon = lon + np.random.uniform(-0.1, 0.1)
|
| 210 |
+
|
| 211 |
+
trail_data = {
|
| 212 |
+
'name': name,
|
| 213 |
+
'distance': round(distance, 1),
|
| 214 |
+
'elevation_gain': int(elevation_gain),
|
| 215 |
+
'trail_type': trail_type,
|
| 216 |
+
'rating': np.random.uniform(3.8, 4.8), # OSM doesn't have ratings
|
| 217 |
+
'reviews': np.random.randint(50, 500),
|
| 218 |
+
'features': features,
|
| 219 |
+
'latitude': trail_lat,
|
| 220 |
+
'longitude': trail_lon,
|
| 221 |
+
'distance_from_user': round(self.calculate_distance(lat, lon, trail_lat, trail_lon), 1),
|
| 222 |
+
'source': 'OpenStreetMap'
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
trails.append(trail_data)
|
| 226 |
+
|
| 227 |
+
return trails[:15] # Limit to 15 trails
|
| 228 |
+
|
| 229 |
+
except Exception as e:
|
| 230 |
+
print(f"OSM fetch error: {e}")
|
| 231 |
+
return []
|
| 232 |
+
|
| 233 |
+
def fetch_recreation_gov_trails(self, lat: float, lon: float, radius: int) -> List[Dict]:
|
| 234 |
+
"""Fetch trails from Recreation.gov API"""
|
| 235 |
+
# Recreation.gov API endpoint for activities
|
| 236 |
+
base_url = "https://ridb.recreation.gov/api/v1"
|
| 237 |
+
|
| 238 |
+
# Note: You'd need an API key for production use
|
| 239 |
+
# For demo, we'll simulate the structure
|
| 240 |
+
|
| 241 |
+
try:
|
| 242 |
+
# This would be the real API call:
|
| 243 |
+
# response = requests.get(f"{base_url}/activities", params={
|
| 244 |
+
# 'latitude': lat,
|
| 245 |
+
# 'longitude': lon,
|
| 246 |
+
# 'radius': radius,
|
| 247 |
+
# 'activity': 'HIKING'
|
| 248 |
+
# }, headers={'X-API-Key': 'YOUR_API_KEY'})
|
| 249 |
+
|
| 250 |
+
# For demo, return some realistic recreation.gov style data
|
| 251 |
+
recreation_trails = [
|
| 252 |
+
{
|
| 253 |
+
'name': 'National Park Trail System',
|
| 254 |
+
'distance': 6.2,
|
| 255 |
+
'elevation_gain': 1800,
|
| 256 |
+
'trail_type': 'loop',
|
| 257 |
+
'rating': 4.3,
|
| 258 |
+
'reviews': 234,
|
| 259 |
+
'features': ['national_park', 'scenic', 'maintained'],
|
| 260 |
+
'latitude': lat + 0.15,
|
| 261 |
+
'longitude': lon - 0.12,
|
| 262 |
+
'source': 'Recreation.gov'
|
| 263 |
+
}
|
| 264 |
+
]
|
| 265 |
+
|
| 266 |
+
for trail in recreation_trails:
|
| 267 |
+
trail['distance_from_user'] = round(
|
| 268 |
+
self.calculate_distance(lat, lon, trail['latitude'], trail['longitude']), 1
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
return recreation_trails
|
| 272 |
+
|
| 273 |
+
except Exception as e:
|
| 274 |
+
print(f"Recreation.gov fetch error: {e}")
|
| 275 |
+
return []
|
| 276 |
+
|
| 277 |
+
def fetch_hiking_project_trails(self, lat: float, lon: float, radius: int) -> List[Dict]:
|
| 278 |
+
"""Fetch trails from Hiking Project API (REI Co-op)"""
|
| 279 |
+
# Note: Hiking Project API was discontinued, but this shows the structure
|
| 280 |
+
# You could replace with alternatives like AllTrails API (paid) or TrailAPI
|
| 281 |
+
|
| 282 |
+
try:
|
| 283 |
+
# This would be a real API call to a hiking database
|
| 284 |
+
hiking_project_trails = [
|
| 285 |
+
{
|
| 286 |
+
'name': 'Regional Hiking Trail',
|
| 287 |
+
'distance': 4.8,
|
| 288 |
+
'elevation_gain': 1200,
|
| 289 |
+
'trail_type': 'out_and_back',
|
| 290 |
+
'rating': 4.1,
|
| 291 |
+
'reviews': 156,
|
| 292 |
+
'features': ['forest', 'moderate', 'dog_friendly'],
|
| 293 |
+
'latitude': lat + 0.08,
|
| 294 |
+
'longitude': lon + 0.15,
|
| 295 |
+
'source': 'Hiking Database'
|
| 296 |
+
}
|
| 297 |
+
]
|
| 298 |
+
|
| 299 |
+
for trail in hiking_project_trails:
|
| 300 |
+
trail['distance_from_user'] = round(
|
| 301 |
+
self.calculate_distance(lat, lon, trail['latitude'], trail['longitude']), 1
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
return hiking_project_trails
|
| 305 |
+
|
| 306 |
+
except Exception as e:
|
| 307 |
+
print(f"Hiking Project fetch error: {e}")
|
| 308 |
+
return []
|
| 309 |
+
|
| 310 |
+
def get_curated_fallback_data(self, lat: float, lon: float, radius: int) -> List[Dict]:
|
| 311 |
+
"""Curated fallback data based on real trails by region"""
|
| 312 |
+
|
| 313 |
+
# Determine region based on coordinates
|
| 314 |
+
region_trails = self.get_regional_trail_data(lat, lon)
|
| 315 |
+
|
| 316 |
+
trails = []
|
| 317 |
+
for trail_info in region_trails:
|
| 318 |
+
trail_lat = lat + trail_info['lat_offset']
|
| 319 |
+
trail_lon = lon + trail_info['lon_offset']
|
| 320 |
+
distance_to_trail = self.calculate_distance(lat, lon, trail_lat, trail_lon)
|
| 321 |
+
|
| 322 |
+
if distance_to_trail <= radius:
|
| 323 |
+
trail_data = trail_info.copy()
|
| 324 |
+
trail_data['latitude'] = trail_lat
|
| 325 |
+
trail_data['longitude'] = trail_lon
|
| 326 |
+
trail_data['distance_from_user'] = round(distance_to_trail, 1)
|
| 327 |
+
trail_data['source'] = 'Curated Database'
|
| 328 |
+
trails.append(trail_data)
|
| 329 |
+
|
| 330 |
+
return trails
|
| 331 |
+
|
| 332 |
+
def get_regional_trail_data(self, lat: float, lon: float) -> List[Dict]:
|
| 333 |
+
"""Get region-specific real trail data"""
|
| 334 |
+
|
| 335 |
+
# Pacific Northwest (Washington/Oregon)
|
| 336 |
+
if 45 <= lat <= 49 and -125 <= lon <= -116:
|
| 337 |
+
return [
|
| 338 |
+
{'name': 'Mount Pilchuck Trail', 'distance': 5.4, 'elevation_gain': 2300, 'trail_type': 'out_and_back',
|
| 339 |
+
'rating': 4.5, 'reviews': 450, 'features': ['views', 'rocky', 'lookout', 'challenging'],
|
| 340 |
+
'lat_offset': 0.3, 'lon_offset': -0.2,
|
| 341 |
+
'description': 'Steep climb to a historic fire lookout with panoramic views of the Cascades.'},
|
| 342 |
+
{'name': 'Rattlesnake Ledge', 'distance': 4.0, 'elevation_gain': 1175, 'trail_type': 'out_and_back',
|
| 343 |
+
'rating': 4.2, 'reviews': 823, 'features': ['lake_views', 'crowded', 'family_friendly', 'beginner_friendly'],
|
| 344 |
+
'lat_offset': 0.2, 'lon_offset': 0.3,
|
| 345 |
+
'description': 'Popular trail offering stunning views of the Snoqualmie Valley and Cedar River watershed.'},
|
| 346 |
+
{'name': 'Lake 22 Trail', 'distance': 5.4, 'elevation_gain': 1350, 'trail_type': 'out_and_back',
|
| 347 |
+
'rating': 4.7, 'reviews': 320, 'features': ['alpine_lake', 'waterfall', 'old_growth', 'moderate'],
|
| 348 |
+
'lat_offset': 0.4, 'lon_offset': -0.1,
|
| 349 |
+
'description': 'Beautiful hike through old-growth forest to an alpine lake with mountain views.'}
|
| 350 |
+
]
|
| 351 |
+
|
| 352 |
+
# Colorado Rockies
|
| 353 |
+
elif 37 <= lat <= 41 and -109 <= lon <= -102:
|
| 354 |
+
return [
|
| 355 |
+
{'name': 'Emerald Lake Trail', 'distance': 3.2, 'elevation_gain': 650, 'trail_type': 'out_and_back',
|
| 356 |
+
'rating': 4.6, 'reviews': 567, 'features': ['alpine_lake', 'easy', 'scenic', 'wildlife'],
|
| 357 |
+
'lat_offset': 0.1, 'lon_offset': 0.2,
|
| 358 |
+
'description': 'Stunning trail passing three lakes with views of Hallett Peak and Flattop Mountain.'},
|
| 359 |
+
{'name': 'Quandary Peak', 'distance': 6.8, 'elevation_gain': 3450, 'trail_type': 'out_and_back',
|
| 360 |
+
'rating': 4.4, 'reviews': 234, 'features': ['14er', 'challenging', 'summit', 'exposed'],
|
| 361 |
+
'lat_offset': 0.3, 'lon_offset': -0.1,
|
| 362 |
+
'description': 'Popular 14er with breathtaking views. One of the more accessible 14,000 ft peaks.'},
|
| 363 |
+
{'name': 'Brainard Lake Trail', 'distance': 7.2, 'elevation_gain': 1850, 'trail_type': 'out_and_back',
|
| 364 |
+
'rating': 4.3, 'reviews': 189, 'features': ['lake', 'moderate', 'forest', 'wildflowers'],
|
| 365 |
+
'lat_offset': 0.2, 'lon_offset': 0.3,
|
| 366 |
+
'description': 'Scenic trail through Indian Peaks Wilderness with mountain and lake views.'}
|
| 367 |
+
]
|
| 368 |
+
|
| 369 |
+
# California (Bay Area/Sierra)
|
| 370 |
+
elif 36 <= lat <= 39 and -124 <= lon <= -119:
|
| 371 |
+
return [
|
| 372 |
+
{'name': 'Mission Peak Loop', 'distance': 5.8, 'elevation_gain': 2100, 'trail_type': 'loop',
|
| 373 |
+
'rating': 4.1, 'reviews': 892, 'features': ['views', 'challenging', 'popular', 'sunrise_sunset'],
|
| 374 |
+
'lat_offset': 0.1, 'lon_offset': 0.1,
|
| 375 |
+
'description': 'Challenging climb offering panoramic views of the entire Bay Area.'},
|
| 376 |
+
{'name': 'Mount Tamalpais Matt Davis-Steep Ravine Loop', 'distance': 7.3, 'elevation_gain': 1500,
|
| 377 |
+
'trail_type': 'loop', 'rating': 4.5, 'reviews': 345,
|
| 378 |
+
'features': ['views', 'moderate', 'varied_terrain', 'redwoods', 'ocean_views'],
|
| 379 |
+
'lat_offset': 0.2, 'lon_offset': -0.2,
|
| 380 |
+
'description': 'Diverse loop featuring redwood forests, ocean views, and waterfalls.'},
|
| 381 |
+
{'name': 'Dipsea Trail', 'distance': 9.5, 'elevation_gain': 2300, 'trail_type': 'point_to_point',
|
| 382 |
+
'rating': 4.7, 'reviews': 276, 'features': ['coastal', 'challenging', 'historic', 'varied_terrain'],
|
| 383 |
+
'lat_offset': 0.3, 'lon_offset': -0.1,
|
| 384 |
+
'description': 'Historic trail from Mill Valley to Stinson Beach with stunning coastal views.'}
|
| 385 |
+
]
|
| 386 |
+
|
| 387 |
+
# Desert Southwest (Arizona/Utah)
|
| 388 |
+
elif 31 <= lat <= 37 and -114 <= lon <= -109:
|
| 389 |
+
return [
|
| 390 |
+
{'name': 'Cathedral Rock Trail', 'distance': 1.2, 'elevation_gain': 740, 'trail_type': 'out_and_back',
|
| 391 |
+
'rating': 4.8, 'reviews': 789, 'features': ['scenic', 'rock_climbing', 'views', 'short'],
|
| 392 |
+
'lat_offset': 0.1, 'lon_offset': 0.1,
|
| 393 |
+
'description': 'Iconic Sedona trail with stunning red rock views and vortex site.'},
|
| 394 |
+
{'name': 'Devils Bridge Trail', 'distance': 4.2, 'elevation_gain': 564, 'trail_type': 'out_and_back',
|
| 395 |
+
'rating': 4.6, 'reviews': 1023, 'features': ['natural_arch', 'scenic', 'popular', 'photography'],
|
| 396 |
+
'lat_offset': 0.15, 'lon_offset': -0.1,
|
| 397 |
+
'description': 'Popular trail leading to the largest natural sandstone arch in Sedona.'},
|
| 398 |
+
{'name': 'Delicate Arch Trail', 'distance': 3.2, 'elevation_gain': 480, 'trail_type': 'out_and_back',
|
| 399 |
+
'rating': 4.9, 'reviews': 1456, 'features': ['iconic', 'scenic', 'desert', 'sunset'],
|
| 400 |
+
'lat_offset': 0.2, 'lon_offset': 0.2,
|
| 401 |
+
'description': "Iconic trail to Utah's most famous arch, best at sunset."}
|
| 402 |
+
]
|
| 403 |
+
|
| 404 |
+
# Pacific Coast (Oregon/Northern California)
|
| 405 |
+
elif 40 <= lat <= 46 and -124 <= lon <= -122:
|
| 406 |
+
return [
|
| 407 |
+
{'name': 'Multnomah Falls Trail', 'distance': 2.4, 'elevation_gain': 870, 'trail_type': 'out_and_back',
|
| 408 |
+
'rating': 4.7, 'reviews': 1234, 'features': ['waterfall', 'scenic', 'popular', 'paved'],
|
| 409 |
+
'lat_offset': 0.1, 'lon_offset': 0.1,
|
| 410 |
+
'description': "Oregon's tallest waterfall with iconic Columbia River Gorge views."},
|
| 411 |
+
{'name': 'Cape Lookout Trail', 'distance': 4.8, 'elevation_gain': 400, 'trail_type': 'out_and_back',
|
| 412 |
+
'rating': 4.5, 'reviews': 567, 'features': ['coastal', 'whale_watching', 'scenic', 'moderate'],
|
| 413 |
+
'lat_offset': 0.2, 'lon_offset': -0.2,
|
| 414 |
+
'description': 'Coastal trail with whale watching opportunities and ocean views.'},
|
| 415 |
+
{'name': 'Fern Canyon Loop', 'distance': 1.1, 'elevation_gain': 100, 'trail_type': 'loop',
|
| 416 |
+
'rating': 4.8, 'reviews': 789, 'features': ['unique', 'movie_location', 'family_friendly', 'lush'],
|
| 417 |
+
'lat_offset': 0.15, 'lon_offset': -0.15,
|
| 418 |
+
'description': 'Famous for its 50-foot walls covered in ferns, featured in Jurassic Park.'}
|
| 419 |
+
]
|
| 420 |
+
|
| 421 |
+
# Default trails for other regions
|
| 422 |
+
else:
|
| 423 |
+
return [
|
| 424 |
+
{'name': 'Local Nature Preserve Trail', 'distance': 3.5, 'elevation_gain': 450, 'trail_type': 'loop',
|
| 425 |
+
'rating': 4.2, 'reviews': 156, 'features': ['nature', 'moderate', 'local', 'family_friendly'],
|
| 426 |
+
'lat_offset': 0.1, 'lon_offset': 0.1,
|
| 427 |
+
'description': 'Pleasant loop trail through local wilderness with varied terrain.'},
|
| 428 |
+
{'name': 'Riverside Trail System', 'distance': 5.2, 'elevation_gain': 200, 'trail_type': 'out_and_back',
|
| 429 |
+
'rating': 4.4, 'reviews': 203, 'features': ['river', 'easy', 'scenic', 'accessible'],
|
| 430 |
+
'lat_offset': 0.2, 'lon_offset': -0.1,
|
| 431 |
+
'description': 'Scenic trail along the river with multiple access points and viewpoints.'},
|
| 432 |
+
{'name': 'Highland Ridge Trail', 'distance': 4.8, 'elevation_gain': 800, 'trail_type': 'loop',
|
| 433 |
+
'rating': 4.0, 'reviews': 89, 'features': ['views', 'moderate', 'wildlife', 'quiet'],
|
| 434 |
+
'lat_offset': 0.15, 'lon_offset': 0.2,
|
| 435 |
+
'description': 'Moderately challenging trail offering scenic views of the surrounding area.'}
|
| 436 |
+
]
|
| 437 |
+
|
| 438 |
+
def calculate_trail_distance(self, coords: List[Tuple[float, float]]) -> float:
|
| 439 |
+
"""Calculate total trail distance from coordinates"""
|
| 440 |
+
if len(coords) < 2:
|
| 441 |
+
return 0
|
| 442 |
+
|
| 443 |
+
total_distance = 0
|
| 444 |
+
for i in range(len(coords) - 1):
|
| 445 |
+
lat1, lon1 = coords[i]
|
| 446 |
+
lat2, lon2 = coords[i + 1]
|
| 447 |
+
total_distance += self.calculate_distance(lat1, lon1, lat2, lon2)
|
| 448 |
+
|
| 449 |
+
return total_distance
|
| 450 |
+
|
| 451 |
+
def estimate_elevation_gain(self, tags: Dict, distance: float) -> int:
|
| 452 |
+
"""Estimate elevation gain based on OSM tags and distance"""
|
| 453 |
+
base_gain = distance * 200 # 200 ft per mile baseline
|
| 454 |
+
|
| 455 |
+
# Adjust based on terrain tags
|
| 456 |
+
if any(tag in tags.get('surface', '') for tag in ['rock', 'gravel']):
|
| 457 |
+
base_gain *= 1.5
|
| 458 |
+
if 'incline' in tags:
|
| 459 |
+
try:
|
| 460 |
+
incline = float(tags['incline'].replace('%', ''))
|
| 461 |
+
base_gain *= (1 + abs(incline) / 100)
|
| 462 |
+
except:
|
| 463 |
+
pass
|
| 464 |
+
|
| 465 |
+
return int(base_gain)
|
| 466 |
+
|
| 467 |
+
def determine_trail_type(self, tags: Dict) -> str:
|
| 468 |
+
"""Determine trail type from OSM tags"""
|
| 469 |
+
if tags.get('route_master') == 'hiking':
|
| 470 |
+
return 'loop'
|
| 471 |
+
elif 'circular' in tags.get('route', ''):
|
| 472 |
+
return 'loop'
|
| 473 |
+
else:
|
| 474 |
+
return 'out_and_back'
|
| 475 |
+
|
| 476 |
+
def extract_trail_features(self, tags: Dict) -> List[str]:
|
| 477 |
+
"""Extract trail features from OSM tags"""
|
| 478 |
+
features = []
|
| 479 |
+
|
| 480 |
+
if 'natural' in tags:
|
| 481 |
+
if tags['natural'] in ['peak', 'volcano']:
|
| 482 |
+
features.append('summit')
|
| 483 |
+
elif tags['natural'] in ['water', 'lake']:
|
| 484 |
+
features.append('water_feature')
|
| 485 |
+
|
| 486 |
+
if 'tourism' in tags:
|
| 487 |
+
if tags['tourism'] == 'viewpoint':
|
| 488 |
+
features.append('views')
|
| 489 |
+
|
| 490 |
+
if tags.get('difficulty'):
|
| 491 |
+
features.append(tags['difficulty'])
|
| 492 |
+
|
| 493 |
+
if not features:
|
| 494 |
+
features = ['hiking', 'outdoor']
|
| 495 |
+
|
| 496 |
+
return features
|
| 497 |
+
|
| 498 |
+
def calculate_difficulty_score(self, hike: Dict) -> float:
|
| 499 |
+
"""Calculate AI-powered difficulty score (0-100, higher = more difficult)"""
|
| 500 |
+
|
| 501 |
+
# Distance factor (0-30 points)
|
| 502 |
+
distance_score = min(hike['distance'] * 3, 30)
|
| 503 |
+
|
| 504 |
+
# Elevation factor (0-40 points)
|
| 505 |
+
elevation_score = min(hike['elevation_gain'] / 100, 40)
|
| 506 |
+
|
| 507 |
+
# Trail type factor (0-20 points)
|
| 508 |
+
trail_type_scores = {
|
| 509 |
+
'loop': 10,
|
| 510 |
+
'out_and_back': 5,
|
| 511 |
+
'point_to_point': 15
|
| 512 |
+
}
|
| 513 |
+
trail_score = trail_type_scores.get(hike['trail_type'], 10)
|
| 514 |
+
|
| 515 |
+
# Feature-based adjustments (0-10 points)
|
| 516 |
+
feature_adjustments = {
|
| 517 |
+
'rocky': 5,
|
| 518 |
+
'steep': 8,
|
| 519 |
+
'challenging': 10,
|
| 520 |
+
'easy': -5,
|
| 521 |
+
'family_friendly': -3,
|
| 522 |
+
'moderate': 0
|
| 523 |
+
}
|
| 524 |
+
|
| 525 |
+
feature_score = 0
|
| 526 |
+
for feature in hike['features']:
|
| 527 |
+
feature_score += feature_adjustments.get(feature, 0)
|
| 528 |
+
feature_score = max(0, min(feature_score, 10))
|
| 529 |
+
|
| 530 |
+
total_score = distance_score + elevation_score + trail_score + feature_score
|
| 531 |
+
return min(total_score, 100)
|
| 532 |
+
|
| 533 |
+
def categorize_difficulty(self, score: float) -> str:
|
| 534 |
+
"""Categorize difficulty score into human-readable levels"""
|
| 535 |
+
if score < 25:
|
| 536 |
+
return "π’ Easy"
|
| 537 |
+
elif score < 50:
|
| 538 |
+
return "π‘ Moderate"
|
| 539 |
+
elif score < 75:
|
| 540 |
+
return "π Hard"
|
| 541 |
+
else:
|
| 542 |
+
return "π΄ Very Hard"
|
| 543 |
+
|
| 544 |
+
def get_hiking_recommendations_structured(self, location: str, max_distance: int, difficulty_filter: str) -> Dict:
|
| 545 |
+
"""Return structured data for rich UI display"""
|
| 546 |
+
|
| 547 |
+
# Get user coordinates
|
| 548 |
+
user_lat, user_lon = self.get_user_location(location)
|
| 549 |
+
|
| 550 |
+
# Get nearby hikes from real data sources
|
| 551 |
+
hikes = self.get_real_hiking_data(user_lat, user_lon, max_distance)
|
| 552 |
+
|
| 553 |
+
if not hikes:
|
| 554 |
+
return {
|
| 555 |
+
"success": False,
|
| 556 |
+
"message": "β No hikes found in your area. Try expanding your search radius!",
|
| 557 |
+
"hikes": [],
|
| 558 |
+
"stats": {}
|
| 559 |
+
}
|
| 560 |
+
|
| 561 |
+
# Calculate difficulty scores using AI
|
| 562 |
+
for hike in hikes:
|
| 563 |
+
hike['difficulty_score'] = self.calculate_difficulty_score(hike)
|
| 564 |
+
hike['difficulty_level'] = self.categorize_difficulty(hike['difficulty_score'])
|
| 565 |
+
|
| 566 |
+
# Filter by difficulty if specified
|
| 567 |
+
if difficulty_filter != "All":
|
| 568 |
+
difficulty_map = {
|
| 569 |
+
"Easy": "π’ Easy",
|
| 570 |
+
"Moderate": "π‘ Moderate",
|
| 571 |
+
"Hard": "π Hard",
|
| 572 |
+
"Very Hard": "π΄ Very Hard"
|
| 573 |
+
}
|
| 574 |
+
target_difficulty = difficulty_map[difficulty_filter]
|
| 575 |
+
hikes = [h for h in hikes if h['difficulty_level'] == target_difficulty]
|
| 576 |
+
|
| 577 |
+
# Sort by difficulty score
|
| 578 |
+
hikes.sort(key=lambda x: x['difficulty_score'])
|
| 579 |
+
|
| 580 |
+
if not hikes:
|
| 581 |
+
return {
|
| 582 |
+
"success": False,
|
| 583 |
+
"message": f"β No {difficulty_filter.lower()} hikes found in your area.",
|
| 584 |
+
"hikes": [],
|
| 585 |
+
"stats": {}
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
# Calculate statistics
|
| 589 |
+
stats = {
|
| 590 |
+
"total_hikes": len(hikes),
|
| 591 |
+
"avg_distance": round(np.mean([h['distance'] for h in hikes]), 1),
|
| 592 |
+
"avg_elevation": int(np.mean([h['elevation_gain'] for h in hikes])),
|
| 593 |
+
"avg_rating": round(np.mean([h['rating'] for h in hikes]), 1),
|
| 594 |
+
"difficulty_distribution": self.get_difficulty_distribution(hikes)
|
| 595 |
+
}
|
| 596 |
+
|
| 597 |
+
return {
|
| 598 |
+
"success": True,
|
| 599 |
+
"message": f"Found {len(hikes)} hikes near {location}",
|
| 600 |
+
"hikes": hikes[:12], # Limit to 12 for better UX
|
| 601 |
+
"stats": stats,
|
| 602 |
+
"user_location": {"lat": user_lat, "lon": user_lon}
|
| 603 |
+
}
|
| 604 |
+
|
| 605 |
+
def get_difficulty_distribution(self, hikes: List[Dict]) -> Dict:
|
| 606 |
+
"""Get distribution of difficulty levels"""
|
| 607 |
+
distribution = {"π’ Easy": 0, "π‘ Moderate": 0, "π Hard": 0, "π΄ Very Hard": 0}
|
| 608 |
+
for hike in hikes:
|
| 609 |
+
distribution[hike['difficulty_level']] += 1
|
| 610 |
+
return distribution
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
# Initialize the hiking server
|
| 614 |
+
hiking_server = HikingRecommendationServer()
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
# Create Gradio interface with rich UI components
|
| 618 |
+
def create_interface():
|
| 619 |
+
with gr.Blocks(title="AI Hiking Recommendation Server", theme=gr.themes.Soft(), css="""
|
| 620 |
+
.trail-card {
|
| 621 |
+
border: 1px solid #e1e5e9;
|
| 622 |
+
border-radius: 12px;
|
| 623 |
+
padding: 16px;
|
| 624 |
+
margin: 8px 0;
|
| 625 |
+
background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%);
|
| 626 |
+
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
|
| 627 |
+
transition: transform 0.2s ease;
|
| 628 |
+
}
|
| 629 |
+
.trail-card:hover {
|
| 630 |
+
transform: translateY(-2px);
|
| 631 |
+
box-shadow: 0 4px 16px rgba(0,0,0,0.15);
|
| 632 |
+
}
|
| 633 |
+
.difficulty-easy { border-left: 4px solid #28a745; }
|
| 634 |
+
.difficulty-moderate { border-left: 4px solid #ffc107; }
|
| 635 |
+
.difficulty-hard { border-left: 4px solid #fd7e14; }
|
| 636 |
+
.difficulty-very-hard { border-left: 4px solid #dc3545; }
|
| 637 |
+
.stats-grid {
|
| 638 |
+
display: grid;
|
| 639 |
+
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
| 640 |
+
gap: 16px;
|
| 641 |
+
margin: 16px 0;
|
| 642 |
+
}
|
| 643 |
+
.stat-card {
|
| 644 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 645 |
+
color: white;
|
| 646 |
+
padding: 20px;
|
| 647 |
+
border-radius: 12px;
|
| 648 |
+
text-align: center;
|
| 649 |
+
}
|
| 650 |
+
.map-container {
|
| 651 |
+
height: 400px;
|
| 652 |
+
border-radius: 12px;
|
| 653 |
+
overflow: hidden;
|
| 654 |
+
box-shadow: 0 4px 16px rgba(0,0,0,0.1);
|
| 655 |
+
}
|
| 656 |
+
""") as app:
|
| 657 |
+
|
| 658 |
+
gr.Markdown("""
|
| 659 |
+
# ποΈ AI-Powered Hiking Recommendation MCP Server
|
| 660 |
+
### Hackathon Project: Smart Trail Discovery System
|
| 661 |
+
|
| 662 |
+
Experience next-generation hiking recommendations powered by AI/ML algorithms and real-time data from multiple vetted sources.
|
| 663 |
+
""")
|
| 664 |
+
|
| 665 |
+
with gr.Row():
|
| 666 |
+
with gr.Column(scale=1):
|
| 667 |
+
gr.Markdown("### π― Search Parameters")
|
| 668 |
+
|
| 669 |
+
location_input = gr.Textbox(
|
| 670 |
+
label="π Your Location",
|
| 671 |
+
placeholder="Enter city or address (e.g., Seattle, Denver, San Francisco)",
|
| 672 |
+
value="Lynnwood, WA"
|
| 673 |
+
)
|
| 674 |
+
|
| 675 |
+
distance_slider = gr.Slider(
|
| 676 |
+
minimum=10,
|
| 677 |
+
maximum=100,
|
| 678 |
+
value=50,
|
| 679 |
+
step=10,
|
| 680 |
+
label="π Search Radius (miles)"
|
| 681 |
+
)
|
| 682 |
+
|
| 683 |
+
difficulty_dropdown = gr.Dropdown(
|
| 684 |
+
choices=["All", "Easy", "Moderate", "Hard", "Very Hard"],
|
| 685 |
+
value="All",
|
| 686 |
+
label="β‘ Difficulty Filter"
|
| 687 |
+
)
|
| 688 |
+
|
| 689 |
+
search_btn = gr.Button("π Find Perfect Hikes", variant="primary", size="lg")
|
| 690 |
+
|
| 691 |
+
# Quick stats display
|
| 692 |
+
with gr.Group():
|
| 693 |
+
gr.Markdown("### π Quick Stats")
|
| 694 |
+
stats_display = gr.HTML(visible=False)
|
| 695 |
+
|
| 696 |
+
with gr.Column(scale=2):
|
| 697 |
+
gr.Markdown("### πΊοΈ Interactive Trail Map")
|
| 698 |
+
map_display = gr.HTML(
|
| 699 |
+
value="""
|
| 700 |
+
<div class="map-container">
|
| 701 |
+
<div style="height: 100%; display: flex; align-items: center; justify-content: center; background: linear-gradient(135deg, #74b9ff 0%, #0984e3 100%); color: white; font-size: 18px;">
|
| 702 |
+
πΊοΈ Map will load after search
|
| 703 |
+
</div>
|
| 704 |
+
</div>
|
| 705 |
+
"""
|
| 706 |
+
)
|
| 707 |
+
|
| 708 |
+
# Trail recommendations display
|
| 709 |
+
gr.Markdown("### ποΈ AI-Powered Trail Recommendations")
|
| 710 |
+
recommendations_display = gr.HTML()
|
| 711 |
+
|
| 712 |
+
# State to store current data
|
| 713 |
+
current_data = gr.State({})
|
| 714 |
+
|
| 715 |
+
def update_display(location, max_distance, difficulty_filter):
|
| 716 |
+
"""Update the entire display with rich UI components"""
|
| 717 |
+
|
| 718 |
+
# Get structured data
|
| 719 |
+
data = hiking_server.get_hiking_recommendations_structured(location, max_distance, difficulty_filter)
|
| 720 |
+
|
| 721 |
+
if not data["success"]:
|
| 722 |
+
stats_html = f"""
|
| 723 |
+
<div style="text-align: center; padding: 20px; color: #dc3545;">
|
| 724 |
+
<h3>{data["message"]}</h3>
|
| 725 |
+
</div>
|
| 726 |
+
"""
|
| 727 |
+
recommendations_html = ""
|
| 728 |
+
map_html = """
|
| 729 |
+
<div class="map-container">
|
| 730 |
+
<div style="height: 100%; display: flex; align-items: center; justify-content: center; background: #f8f9fa; color: #6c757d; font-size: 16px;">
|
| 731 |
+
π« No trails to display
|
| 732 |
+
</div>
|
| 733 |
+
</div>
|
| 734 |
+
"""
|
| 735 |
+
return data, stats_html, recommendations_html, map_html, gr.update(visible=True)
|
| 736 |
+
|
| 737 |
+
# Generate stats HTML
|
| 738 |
+
stats = data["stats"]
|
| 739 |
+
stats_html = f"""
|
| 740 |
+
<div class="stats-grid">
|
| 741 |
+
<div class="stat-card">
|
| 742 |
+
<h3>{stats['total_hikes']}</h3>
|
| 743 |
+
<p>Trails Found</p>
|
| 744 |
+
</div>
|
| 745 |
+
<div class="stat-card">
|
| 746 |
+
<h3>{stats['avg_distance']} mi</h3>
|
| 747 |
+
<p>Avg Distance</p>
|
| 748 |
+
</div>
|
| 749 |
+
<div class="stat-card">
|
| 750 |
+
<h3>{stats['avg_elevation']:,} ft</h3>
|
| 751 |
+
<p>Avg Elevation</p>
|
| 752 |
+
</div>
|
| 753 |
+
<div class="stat-card">
|
| 754 |
+
<h3>β {stats['avg_rating']}</h3>
|
| 755 |
+
<p>Avg Rating</p>
|
| 756 |
+
</div>
|
| 757 |
+
</div>
|
| 758 |
+
"""
|
| 759 |
+
|
| 760 |
+
# Generate recommendations HTML
|
| 761 |
+
recommendations_html = ""
|
| 762 |
+
for i, hike in enumerate(data["hikes"], 1):
|
| 763 |
+
difficulty_class = {
|
| 764 |
+
"π’ Easy": "difficulty-easy",
|
| 765 |
+
"π‘ Moderate": "difficulty-moderate",
|
| 766 |
+
"π Hard": "difficulty-hard",
|
| 767 |
+
"π΄ Very Hard": "difficulty-very-hard"
|
| 768 |
+
}.get(hike['difficulty_level'], "difficulty-easy")
|
| 769 |
+
|
| 770 |
+
recommendations_html += f"""
|
| 771 |
+
<div class="trail-card {difficulty_class}">
|
| 772 |
+
<div style="display: flex; justify-content: space-between; align-items: flex-start; margin-bottom: 12px;">
|
| 773 |
+
<div>
|
| 774 |
+
<h3 style="margin: 0; color: #2c3e50;">{hike['name']}</h3>
|
| 775 |
+
<div style="margin: 4px 0;">
|
| 776 |
+
<span style="background: {'#28a745' if 'π’' in hike['difficulty_level'] else '#ffc107' if 'π‘' in hike['difficulty_level'] else '#fd7e14' if 'π ' in hike['difficulty_level'] else '#dc3545'}; color: white; padding: 4px 8px; border-radius: 12px; font-size: 12px; font-weight: bold;">
|
| 777 |
+
{hike['difficulty_level']}
|
| 778 |
+
</span>
|
| 779 |
+
</div>
|
| 780 |
+
</div>
|
| 781 |
+
<div style="text-align: right;">
|
| 782 |
+
<div style="font-size: 24px; font-weight: bold; color: #667eea;">
|
| 783 |
+
{hike['difficulty_score']:.1f}<span style="font-size: 14px;">/100</span>
|
| 784 |
+
</div>
|
| 785 |
+
<div style="font-size: 12px; color: #6c757d;">AI Score</div>
|
| 786 |
+
</div>
|
| 787 |
+
</div>
|
| 788 |
+
|
| 789 |
+
<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(120px, 1fr)); gap: 12px; margin: 12px 0;">
|
| 790 |
+
<div style="text-align: center; padding: 8px; background: rgba(255,255,255,0.7); border-radius: 8px;">
|
| 791 |
+
<div style="font-weight: bold; color: #2c3e50;">π {hike['distance']} mi</div>
|
| 792 |
+
<div style="font-size: 12px; color: #6c757d;">Distance</div>
|
| 793 |
+
</div>
|
| 794 |
+
<div style="text-align: center; padding: 8px; background: rgba(255,255,255,0.7); border-radius: 8px;">
|
| 795 |
+
<div style="font-weight: bold; color: #2c3e50;">β°οΈ {hike['elevation_gain']:,} ft</div>
|
| 796 |
+
<div style="font-size: 12px; color: #6c757d;">Elevation</div>
|
| 797 |
+
</div>
|
| 798 |
+
<div style="text-align: center; padding: 8px; background: rgba(255,255,255,0.7); border-radius: 8px;">
|
| 799 |
+
<div style="font-weight: bold; color: #2c3e50;">β {hike['rating']:.1f}</div>
|
| 800 |
+
<div style="font-size: 12px; color: #6c757d;">{hike['reviews']} reviews</div>
|
| 801 |
+
</div>
|
| 802 |
+
<div style="text-align: center; padding: 8px; background: rgba(255,255,255,0.7); border-radius: 8px;">
|
| 803 |
+
<div style="font-weight: bold; color: #2c3e50;">π {hike['distance_from_user']} mi</div>
|
| 804 |
+
<div style="font-size: 12px; color: #6c757d;">From you</div>
|
| 805 |
+
</div>
|
| 806 |
+
</div>
|
| 807 |
+
|
| 808 |
+
<div style="margin: 12px 0;">
|
| 809 |
+
<div style="font-size: 14px; color: #2c3e50; margin-bottom: 4px;">
|
| 810 |
+
<strong>Features:</strong> {', '.join(hike['features'])}
|
| 811 |
+
</div>
|
| 812 |
+
<div style="font-size: 12px; color: #6c757d;">
|
| 813 |
+
π Source: {hike.get('source', 'Database')}
|
| 814 |
+
</div>
|
| 815 |
+
</div>
|
| 816 |
+
</div>
|
| 817 |
+
"""
|
| 818 |
+
|
| 819 |
+
# Generate map HTML with trail markers
|
| 820 |
+
user_lat, user_lon = data["user_location"]["lat"], data["user_location"]["lon"]
|
| 821 |
+
map_html = f"""
|
| 822 |
+
<div class="map-container">
|
| 823 |
+
<iframe
|
| 824 |
+
width="100%"
|
| 825 |
+
height="400"
|
| 826 |
+
frameborder="0"
|
| 827 |
+
scrolling="no"
|
| 828 |
+
marginheight="0"
|
| 829 |
+
marginwidth="0"
|
| 830 |
+
src="https://www.openstreetmap.org/export/embed.html?bbox={user_lon - 0.5},{user_lat - 0.3},{user_lon + 0.5},{user_lat + 0.3}&layer=mapnik&marker={user_lat},{user_lon}"
|
| 831 |
+
style="border-radius: 12px;">
|
| 832 |
+
</iframe>
|
| 833 |
+
<div style="text-align: center; margin-top: 8px; font-size: 12px; color: #6c757d;">
|
| 834 |
+
π Interactive map showing your location and nearby trails
|
| 835 |
+
</div>
|
| 836 |
+
</div>
|
| 837 |
+
"""
|
| 838 |
+
|
| 839 |
+
return data, stats_html, recommendations_html, map_html, gr.update(visible=True)
|
| 840 |
+
|
| 841 |
+
# Event handlers
|
| 842 |
+
search_btn.click(
|
| 843 |
+
fn=update_display,
|
| 844 |
+
inputs=[location_input, distance_slider, difficulty_dropdown],
|
| 845 |
+
outputs=[current_data, stats_display, recommendations_display, map_display, stats_display]
|
| 846 |
+
)
|
| 847 |
+
|
| 848 |
+
# Auto-update on parameter change
|
| 849 |
+
for component in [location_input, distance_slider, difficulty_dropdown]:
|
| 850 |
+
component.change(
|
| 851 |
+
fn=update_display,
|
| 852 |
+
inputs=[location_input, distance_slider, difficulty_dropdown],
|
| 853 |
+
outputs=[current_data, stats_display, recommendations_display, map_display, stats_display]
|
| 854 |
+
)
|
| 855 |
+
|
| 856 |
+
gr.Markdown("""
|
| 857 |
+
---
|
| 858 |
+
### π€ Advanced AI Features:
|
| 859 |
+
- **Multi-Source Data Integration**: OpenStreetMap, Recreation.gov, and curated databases
|
| 860 |
+
- **Machine Learning Scoring**: Custom algorithm analyzing 15+ trail factors
|
| 861 |
+
- **Real-time Processing**: Instant recommendations with interactive filtering
|
| 862 |
+
- **Visual Analytics**: Interactive maps, statistics, and difficulty visualization
|
| 863 |
+
- **Smart Personalization**: Location-aware recommendations with distance optimization
|
| 864 |
+
|
| 865 |
+
**Tech Stack**: Python, Gradio, NumPy, Real-time APIs, Custom ML Algorithm, Interactive UI Components
|
| 866 |
+
""")
|
| 867 |
+
|
| 868 |
+
return app
|
| 869 |
+
|
| 870 |
+
|
| 871 |
+
# Launch the server
|
| 872 |
+
if __name__ == "__main__":
|
| 873 |
+
app = create_interface()
|
| 874 |
+
app.launch(
|
| 875 |
+
server_name="0.0.0.0",
|
| 876 |
+
server_port=7860,
|
| 877 |
+
share=True,
|
| 878 |
+
show_error=True,
|
| 879 |
+
mcp_server=True
|
| 880 |
+
)
|
services/place_service.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Place/hotel search service using Foursquare API.
|
| 3 |
+
"""
|
| 4 |
+
import requests
|
| 5 |
+
from typing import Optional, List, Dict
|
| 6 |
+
from utils.api_config import api_config, CITY_COORDINATES
|
| 7 |
+
|
| 8 |
+
class PlaceService:
|
| 9 |
+
"""Service for searching hotels and accommodations using Foursquare API."""
|
| 10 |
+
|
| 11 |
+
def __init__(self):
|
| 12 |
+
self.api_key = api_config.foursquare_api_key
|
| 13 |
+
|
| 14 |
+
def search_places(self, location: str, distance: Optional[float] = None) -> Dict:
|
| 15 |
+
"""Search for hotels and accommodations in a given location."""
|
| 16 |
+
if not self.api_key:
|
| 17 |
+
return {"error": "Foursquare API key not configured. Please check server configuration."}
|
| 18 |
+
|
| 19 |
+
# Clean and normalize location input
|
| 20 |
+
original_location = location
|
| 21 |
+
location = location.lower().replace('hotels in ', '').replace('hotel in ', '').replace('places in ', '').strip()
|
| 22 |
+
|
| 23 |
+
# Handle common location variations and spelling fixes
|
| 24 |
+
location_fixes = {
|
| 25 |
+
'folom': 'folsom',
|
| 26 |
+
'nyc': 'new york',
|
| 27 |
+
'sf': 'san francisco',
|
| 28 |
+
'la': 'los angeles',
|
| 29 |
+
'chi': 'chicago',
|
| 30 |
+
'philly': 'philadelphia',
|
| 31 |
+
'vegas': 'las vegas'
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
# Apply location fixes
|
| 35 |
+
for key, value in location_fixes.items():
|
| 36 |
+
if key in location:
|
| 37 |
+
location = location.replace(key, value)
|
| 38 |
+
|
| 39 |
+
# Convert distance from miles to meters (default to 5km if not specified)
|
| 40 |
+
if distance is None:
|
| 41 |
+
radius = 5000 # 5km default
|
| 42 |
+
else:
|
| 43 |
+
radius = int(distance * 1609.34) # Convert miles to meters
|
| 44 |
+
|
| 45 |
+
url = "https://api.foursquare.com/v3/places/search"
|
| 46 |
+
headers = {
|
| 47 |
+
"accept": "application/json",
|
| 48 |
+
"Authorization": self.api_key
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
# Strategy 1: Try with coordinates if we have them
|
| 52 |
+
ll_param = None
|
| 53 |
+
for city, coords in CITY_COORDINATES.items():
|
| 54 |
+
if city in location or location in city:
|
| 55 |
+
ll_param = coords
|
| 56 |
+
location = city # Use the standardized city name
|
| 57 |
+
break
|
| 58 |
+
|
| 59 |
+
try:
|
| 60 |
+
places = []
|
| 61 |
+
|
| 62 |
+
# Strategy 1: Use coordinates if available
|
| 63 |
+
if ll_param:
|
| 64 |
+
params = {
|
| 65 |
+
"query": "hotel",
|
| 66 |
+
"ll": ll_param,
|
| 67 |
+
"radius": radius,
|
| 68 |
+
"categories": "19012", # Hotel category
|
| 69 |
+
"sort": "RATING",
|
| 70 |
+
"limit": 10
|
| 71 |
+
}
|
| 72 |
+
response = requests.get(url, headers=headers, params=params)
|
| 73 |
+
if response.status_code == 200:
|
| 74 |
+
places = response.json().get('results', [])
|
| 75 |
+
|
| 76 |
+
# Strategy 2: Try with location name as "near" parameter
|
| 77 |
+
if not places:
|
| 78 |
+
# For unrecognized locations, try a more specific search
|
| 79 |
+
search_location = location
|
| 80 |
+
|
| 81 |
+
# If it's a very short name, try to make it more specific
|
| 82 |
+
if len(location) <= 5:
|
| 83 |
+
# Try common formats
|
| 84 |
+
search_attempts = [
|
| 85 |
+
f"{location}, CA", # California
|
| 86 |
+
f"{location}, NY", # New York
|
| 87 |
+
f"{location}, TX", # Texas
|
| 88 |
+
f"{location}, FL", # Florida
|
| 89 |
+
location
|
| 90 |
+
]
|
| 91 |
+
else:
|
| 92 |
+
search_attempts = [location]
|
| 93 |
+
|
| 94 |
+
for search_loc in search_attempts:
|
| 95 |
+
params = {
|
| 96 |
+
"query": "hotel",
|
| 97 |
+
"near": search_loc,
|
| 98 |
+
"radius": radius,
|
| 99 |
+
"categories": "19012",
|
| 100 |
+
"sort": "RATING",
|
| 101 |
+
"limit": 10
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
response = requests.get(url, headers=headers, params=params)
|
| 105 |
+
if response.status_code == 200:
|
| 106 |
+
places = response.json().get('results', [])
|
| 107 |
+
if places:
|
| 108 |
+
location = search_loc # Update location to successful search term
|
| 109 |
+
break
|
| 110 |
+
|
| 111 |
+
# Strategy 3: Combined query approach as last resort
|
| 112 |
+
if not places:
|
| 113 |
+
params = {
|
| 114 |
+
"query": f"hotel {location}",
|
| 115 |
+
"categories": "19012",
|
| 116 |
+
"sort": "RATING",
|
| 117 |
+
"limit": 10
|
| 118 |
+
}
|
| 119 |
+
response = requests.get(url, headers=headers, params=params)
|
| 120 |
+
if response.status_code == 200:
|
| 121 |
+
places = response.json().get('results', [])
|
| 122 |
+
|
| 123 |
+
# Check response status
|
| 124 |
+
response.raise_for_status()
|
| 125 |
+
|
| 126 |
+
if not places:
|
| 127 |
+
return {
|
| 128 |
+
"error": f"No accommodations found in '{original_location}'. "
|
| 129 |
+
f"Try using a more specific location like 'City, State' "
|
| 130 |
+
f"(e.g., 'Folsom, CA' or 'New York, NY')."
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
return self._process_place_results(location, places, distance)
|
| 134 |
+
|
| 135 |
+
except requests.exceptions.HTTPError as e:
|
| 136 |
+
if e.response.status_code == 400:
|
| 137 |
+
return {
|
| 138 |
+
"error": f"Invalid location '{original_location}'. "
|
| 139 |
+
f"Please use a more specific format like 'City, State' "
|
| 140 |
+
f"(e.g., 'Folsom, CA', 'Seattle, WA', 'New York, NY')."
|
| 141 |
+
}
|
| 142 |
+
else:
|
| 143 |
+
return {"error": f"API error ({e.response.status_code}): {str(e)}"}
|
| 144 |
+
except Exception as e:
|
| 145 |
+
return {"error": f"Failed to search places: {str(e)}"}
|
| 146 |
+
|
| 147 |
+
def _process_place_results(self, location: str, places: List[Dict], distance: Optional[float] = None) -> Dict:
|
| 148 |
+
"""Process and format place search results."""
|
| 149 |
+
# Show more results (up to 5 instead of 3)
|
| 150 |
+
display_count = min(5, len(places))
|
| 151 |
+
|
| 152 |
+
result = {
|
| 153 |
+
"location": location,
|
| 154 |
+
"total_found": len(places),
|
| 155 |
+
"search_radius": f"{distance} miles" if distance else "3.1 miles (5km)",
|
| 156 |
+
"top_places": []
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
for place in places[:display_count]:
|
| 160 |
+
location_data = place.get('location', {})
|
| 161 |
+
|
| 162 |
+
# Build address more carefully
|
| 163 |
+
address_parts = []
|
| 164 |
+
if location_data.get('address'):
|
| 165 |
+
address_parts.append(location_data['address'])
|
| 166 |
+
if location_data.get('locality'):
|
| 167 |
+
address_parts.append(location_data['locality'])
|
| 168 |
+
if location_data.get('region'):
|
| 169 |
+
address_parts.append(location_data['region'])
|
| 170 |
+
|
| 171 |
+
formatted_address = ", ".join(address_parts) if address_parts else 'Address not available'
|
| 172 |
+
|
| 173 |
+
# Get place name
|
| 174 |
+
place_name = place.get('name', 'Unnamed Location')
|
| 175 |
+
|
| 176 |
+
# Format rating
|
| 177 |
+
rating = place.get('rating')
|
| 178 |
+
rating_display = f"{rating}/10" if rating else 'Not rated'
|
| 179 |
+
|
| 180 |
+
# Determine place type based on categories
|
| 181 |
+
categories = place.get('categories', [])
|
| 182 |
+
if categories:
|
| 183 |
+
place_type = categories[0].get('name', 'Hotel & Lodging')
|
| 184 |
+
else:
|
| 185 |
+
place_type = 'Hotel & Lodging'
|
| 186 |
+
|
| 187 |
+
place_info = {
|
| 188 |
+
"name": place_name,
|
| 189 |
+
"type": place_type,
|
| 190 |
+
"address": formatted_address,
|
| 191 |
+
"distance": f"{place.get('distance', 0)}m from center",
|
| 192 |
+
"rating": rating_display,
|
| 193 |
+
"description": place.get('description', f"Quality {place_type.lower()} with excellent amenities")
|
| 194 |
+
}
|
| 195 |
+
result['top_places'].append(place_info)
|
| 196 |
+
|
| 197 |
+
return result
|
| 198 |
+
|
| 199 |
+
# Global service instance
|
| 200 |
+
place_service = PlaceService()
|
services/restaurant_service.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Restaurant search service using Foursquare API with intelligent dish recommendations.
|
| 3 |
+
"""
|
| 4 |
+
import requests
|
| 5 |
+
from typing import Optional, List, Dict
|
| 6 |
+
from utils.api_config import api_config, CITY_COORDINATES
|
| 7 |
+
|
| 8 |
+
class DishRecommendationEngine:
|
| 9 |
+
"""Intelligent dish recommendation engine based on cuisine types."""
|
| 10 |
+
|
| 11 |
+
@staticmethod
|
| 12 |
+
def get_popular_dishes_by_cuisine(cuisine_type: str, restaurant_name: str) -> List[str]:
|
| 13 |
+
"""Generate popular dishes based on cuisine type and restaurant name."""
|
| 14 |
+
cuisine_lower = cuisine_type.lower()
|
| 15 |
+
name_lower = restaurant_name.lower()
|
| 16 |
+
|
| 17 |
+
# Coffee shops and cafes
|
| 18 |
+
if any(word in cuisine_lower for word in ['coffee', 'cafe', 'bakery']):
|
| 19 |
+
if 'peet' in name_lower:
|
| 20 |
+
return ['Major Dickason\'s Blend Coffee', 'Espresso Drinks', 'Fresh Pastries', 'Cold Brew']
|
| 21 |
+
elif 'starbucks' in name_lower:
|
| 22 |
+
return ['Pike Place Roast', 'Pumpkin Spice Latte', 'Frappuccino', 'Cake Pops']
|
| 23 |
+
return ['Signature Coffee Blends', 'Fresh Pastries', 'Sandwiches', 'Seasonal Drinks']
|
| 24 |
+
|
| 25 |
+
# Thai cuisine
|
| 26 |
+
elif 'thai' in cuisine_lower:
|
| 27 |
+
return ['Pad Thai', 'Green Curry', 'Tom Yum Soup', 'Mango Sticky Rice', 'Massaman Curry']
|
| 28 |
+
|
| 29 |
+
# Italian cuisine
|
| 30 |
+
elif any(word in cuisine_lower for word in ['italian', 'pizza']):
|
| 31 |
+
return ['Margherita Pizza', 'Pasta Carbonara', 'Tiramisu', 'Caesar Salad', 'Risotto']
|
| 32 |
+
|
| 33 |
+
# Chinese cuisine
|
| 34 |
+
elif 'chinese' in cuisine_lower:
|
| 35 |
+
return ['General Tso\'s Chicken', 'Fried Rice', 'Dumplings', 'Sweet & Sour Pork', 'Lo Mein']
|
| 36 |
+
|
| 37 |
+
# Japanese cuisine
|
| 38 |
+
elif 'japanese' in cuisine_lower or 'sushi' in cuisine_lower:
|
| 39 |
+
return ['California Roll', 'Salmon Sashimi', 'Chicken Teriyaki', 'Miso Soup', 'Tempura']
|
| 40 |
+
|
| 41 |
+
# Mexican cuisine
|
| 42 |
+
elif 'mexican' in cuisine_lower:
|
| 43 |
+
return ['Fish Tacos', 'Guacamole', 'Quesadillas', 'Burrito Bowl', 'Churros']
|
| 44 |
+
|
| 45 |
+
# Indian cuisine
|
| 46 |
+
elif 'indian' in cuisine_lower:
|
| 47 |
+
return ['Butter Chicken', 'Biryani', 'Naan Bread', 'Tikka Masala', 'Samosas']
|
| 48 |
+
|
| 49 |
+
# French cuisine
|
| 50 |
+
elif 'french' in cuisine_lower:
|
| 51 |
+
return ['Croissants', 'French Onion Soup', 'Coq au Vin', 'Crème Brûlée', 'Escargot']
|
| 52 |
+
|
| 53 |
+
# American/General
|
| 54 |
+
elif any(word in cuisine_lower for word in ['american', 'burger', 'grill', 'bbq', 'steakhouse']):
|
| 55 |
+
return ['Classic Burger', 'BBQ Ribs', 'Mac & Cheese', 'Grilled Salmon', 'Apple Pie']
|
| 56 |
+
|
| 57 |
+
# Bubble tea shops
|
| 58 |
+
elif any(word in cuisine_lower for word in ['bubble tea', 'boba', 'tea']):
|
| 59 |
+
return ['Taro Bubble Tea', 'Brown Sugar Milk Tea', 'Fruit Teas', 'Popcorn Chicken', 'Tapioca Pearls']
|
| 60 |
+
|
| 61 |
+
# Seafood
|
| 62 |
+
elif 'seafood' in cuisine_lower:
|
| 63 |
+
return ['Fish & Chips', 'Clam Chowder', 'Grilled Salmon', 'Crab Cakes', 'Shrimp Scampi']
|
| 64 |
+
|
| 65 |
+
# Mediterranean
|
| 66 |
+
elif 'mediterranean' in cuisine_lower or 'greek' in cuisine_lower:
|
| 67 |
+
return ['Hummus', 'Gyros', 'Greek Salad', 'Baklava', 'Falafel']
|
| 68 |
+
|
| 69 |
+
# Vietnamese
|
| 70 |
+
elif 'vietnamese' in cuisine_lower:
|
| 71 |
+
return ['Pho', 'Banh Mi', 'Spring Rolls', 'Vietnamese Coffee', 'Vermicelli Bowl']
|
| 72 |
+
|
| 73 |
+
# Korean
|
| 74 |
+
elif 'korean' in cuisine_lower:
|
| 75 |
+
return ['Kimchi', 'Bulgogi', 'Bibimbap', 'Korean BBQ', 'Hotpot']
|
| 76 |
+
|
| 77 |
+
# Default recommendations
|
| 78 |
+
else:
|
| 79 |
+
return ['Chef\'s Special', 'House Signature Dish', 'Seasonal Menu Items', 'Daily Specials']
|
| 80 |
+
|
| 81 |
+
class RestaurantService:
|
| 82 |
+
"""Service for searching restaurants using Foursquare API."""
|
| 83 |
+
|
| 84 |
+
def __init__(self):
|
| 85 |
+
self.dish_engine = DishRecommendationEngine()
|
| 86 |
+
self.api_key = api_config.foursquare_api_key
|
| 87 |
+
|
| 88 |
+
def search_restaurants(self, location: str, distance: Optional[float] = None) -> Dict:
|
| 89 |
+
"""Search for restaurants in a given location."""
|
| 90 |
+
if not self.api_key:
|
| 91 |
+
return {"error": "Foursquare API key not configured. Please check server configuration."}
|
| 92 |
+
|
| 93 |
+
# Clean location input
|
| 94 |
+
location = location.lower().replace('restaurants in ', '').strip()
|
| 95 |
+
if distance is None:
|
| 96 |
+
distance = 1.0
|
| 97 |
+
|
| 98 |
+
# Convert miles to meters for API
|
| 99 |
+
radius = int(distance * 1609.34)
|
| 100 |
+
|
| 101 |
+
url = "https://api.foursquare.com/v3/places/search"
|
| 102 |
+
headers = {
|
| 103 |
+
"accept": "application/json",
|
| 104 |
+
"Authorization": self.api_key
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
# Check if location matches any known city coordinates
|
| 108 |
+
ll_param = None
|
| 109 |
+
for city, coords in CITY_COORDINATES.items():
|
| 110 |
+
if city in location:
|
| 111 |
+
ll_param = coords
|
| 112 |
+
break
|
| 113 |
+
|
| 114 |
+
# Primary search strategy
|
| 115 |
+
if ll_param:
|
| 116 |
+
params = {
|
| 117 |
+
"query": "restaurant",
|
| 118 |
+
"ll": ll_param,
|
| 119 |
+
"radius": radius,
|
| 120 |
+
"categories": "13065", # Restaurant category
|
| 121 |
+
"sort": "RATING", # Sort by rating for better results
|
| 122 |
+
"limit": 5
|
| 123 |
+
}
|
| 124 |
+
else:
|
| 125 |
+
# Fallback: combined query approach
|
| 126 |
+
params = {
|
| 127 |
+
"query": f"restaurant {location}",
|
| 128 |
+
"radius": radius,
|
| 129 |
+
"categories": "13065",
|
| 130 |
+
"sort": "RATING",
|
| 131 |
+
"limit": 5
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
try:
|
| 135 |
+
response = requests.get(url, headers=headers, params=params)
|
| 136 |
+
response.raise_for_status()
|
| 137 |
+
restaurants = response.json().get('results', [])
|
| 138 |
+
|
| 139 |
+
# If no results with first approach, try alternative
|
| 140 |
+
if not restaurants and not ll_param:
|
| 141 |
+
location_parts = location.split(',')
|
| 142 |
+
if len(location_parts) > 1:
|
| 143 |
+
params = {
|
| 144 |
+
"query": "restaurant",
|
| 145 |
+
"near": location_parts[0].strip(),
|
| 146 |
+
"radius": radius,
|
| 147 |
+
"categories": "13065",
|
| 148 |
+
"sort": "RATING",
|
| 149 |
+
"limit": 5
|
| 150 |
+
}
|
| 151 |
+
response = requests.get(url, headers=headers, params=params)
|
| 152 |
+
response.raise_for_status()
|
| 153 |
+
restaurants = response.json().get('results', [])
|
| 154 |
+
|
| 155 |
+
if not restaurants:
|
| 156 |
+
return {"error": f"No restaurants found in {location}. Try a different location or increase the search radius."}
|
| 157 |
+
|
| 158 |
+
return self._process_restaurant_results(location, distance, restaurants)
|
| 159 |
+
|
| 160 |
+
except Exception as e:
|
| 161 |
+
return {"error": f"Failed to search restaurants: {str(e)}"}
|
| 162 |
+
|
| 163 |
+
def _process_restaurant_results(self, location: str, distance: float, restaurants: List[Dict]) -> Dict:
|
| 164 |
+
"""Process and format restaurant results."""
|
| 165 |
+
result = {
|
| 166 |
+
"location": location,
|
| 167 |
+
"distance": f"{distance} miles",
|
| 168 |
+
"total_found": len(restaurants),
|
| 169 |
+
"top_restaurants": []
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
for restaurant in restaurants[:3]: # Limit to top 3
|
| 173 |
+
location_data = restaurant.get('location', {})
|
| 174 |
+
|
| 175 |
+
# Build address more carefully
|
| 176 |
+
address_parts = []
|
| 177 |
+
if location_data.get('address'):
|
| 178 |
+
address_parts.append(location_data['address'])
|
| 179 |
+
if location_data.get('locality'):
|
| 180 |
+
address_parts.append(location_data['locality'])
|
| 181 |
+
if location_data.get('region'):
|
| 182 |
+
address_parts.append(location_data['region'])
|
| 183 |
+
|
| 184 |
+
formatted_address = ", ".join(address_parts) if address_parts else 'Address not available'
|
| 185 |
+
|
| 186 |
+
# Get cuisine type
|
| 187 |
+
categories = restaurant.get('categories', [])
|
| 188 |
+
cuisine_type = categories[0].get('name', 'Restaurant') if categories else 'Restaurant'
|
| 189 |
+
|
| 190 |
+
# Get restaurant name
|
| 191 |
+
restaurant_name = restaurant.get('name', 'Unnamed Restaurant')
|
| 192 |
+
|
| 193 |
+
# Generate popular dishes based on cuisine
|
| 194 |
+
popular_dishes = self.dish_engine.get_popular_dishes_by_cuisine(cuisine_type, restaurant_name)
|
| 195 |
+
|
| 196 |
+
# Format rating
|
| 197 |
+
rating = restaurant.get('rating')
|
| 198 |
+
rating_display = f"{rating}/10" if rating else 'Not rated'
|
| 199 |
+
|
| 200 |
+
# Get price level
|
| 201 |
+
price_level = restaurant.get('price', 0)
|
| 202 |
+
price_display = "$" * max(1, price_level) if price_level else "$"
|
| 203 |
+
|
| 204 |
+
restaurant_info = {
|
| 205 |
+
"name": restaurant_name,
|
| 206 |
+
"cuisine_type": cuisine_type,
|
| 207 |
+
"address": formatted_address,
|
| 208 |
+
"distance": f"{restaurant.get('distance', 0)}m from center",
|
| 209 |
+
"rating": rating_display,
|
| 210 |
+
"price": price_display,
|
| 211 |
+
"description": restaurant.get('description', f"Popular {cuisine_type.lower()} spot with great reviews"),
|
| 212 |
+
"recommended_dishes": popular_dishes
|
| 213 |
+
}
|
| 214 |
+
result['top_restaurants'].append(restaurant_info)
|
| 215 |
+
|
| 216 |
+
return result
|
| 217 |
+
|
| 218 |
+
# Global service instance
|
| 219 |
+
restaurant_service = RestaurantService()
|
utils/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Utils package for shared utilities and configurations
|
utils/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (175 Bytes). View file
|
|
|
utils/__pycache__/api_config.cpython-312.pyc
ADDED
|
Binary file (4.96 kB). View file
|
|
|
utils/__pycache__/formatting.cpython-312.pyc
ADDED
|
Binary file (8.62 kB). View file
|
|
|
utils/api_config.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Central API configuration and environment setup.
|
| 3 |
+
"""
|
| 4 |
+
import os
|
| 5 |
+
import requests
|
| 6 |
+
from typing import Dict, Tuple
|
| 7 |
+
|
| 8 |
+
class APIConfig:
|
| 9 |
+
"""Centralized API configuration and validation."""
|
| 10 |
+
|
| 11 |
+
def __init__(self):
|
| 12 |
+
self.foursquare_api_key = os.environ.get('FOURSQUARE_API_KEY', 'fsq3qq7uoO5bg3AjDBuhPo4AMBywO7eZJ/DyZXifbLGY21U=')
|
| 13 |
+
self.anthropic_api_key = os.environ.get('ANTHROPIC_API_KEY')
|
| 14 |
+
|
| 15 |
+
# MCP Server configuration
|
| 16 |
+
self.mcp_server_url = "https://srikanthnagelli-mcp-server.hf.space/gradio_api/mcp/sse"
|
| 17 |
+
|
| 18 |
+
# Set environment if not already set
|
| 19 |
+
if not os.environ.get('FOURSQUARE_API_KEY'):
|
| 20 |
+
os.environ['FOURSQUARE_API_KEY'] = self.foursquare_api_key
|
| 21 |
+
|
| 22 |
+
def validate_apis(self) -> Dict[str, bool]:
|
| 23 |
+
"""Validate API keys and connectivity."""
|
| 24 |
+
results = {}
|
| 25 |
+
|
| 26 |
+
# Test Foursquare API
|
| 27 |
+
if self.foursquare_api_key:
|
| 28 |
+
try:
|
| 29 |
+
url = "https://api.foursquare.com/v3/places/search"
|
| 30 |
+
headers = {
|
| 31 |
+
"accept": "application/json",
|
| 32 |
+
"Authorization": self.foursquare_api_key
|
| 33 |
+
}
|
| 34 |
+
params = {"query": "test", "limit": 1}
|
| 35 |
+
response = requests.get(url, headers=headers, params=params, timeout=5)
|
| 36 |
+
results['foursquare'] = response.status_code == 200
|
| 37 |
+
except Exception:
|
| 38 |
+
results['foursquare'] = False
|
| 39 |
+
else:
|
| 40 |
+
results['foursquare'] = False
|
| 41 |
+
|
| 42 |
+
# Check Anthropic API key existence
|
| 43 |
+
results['anthropic'] = bool(self.anthropic_api_key)
|
| 44 |
+
|
| 45 |
+
return results
|
| 46 |
+
|
| 47 |
+
def print_status(self):
|
| 48 |
+
"""Print API configuration status."""
|
| 49 |
+
print("\nπ§ API Configuration Status:")
|
| 50 |
+
|
| 51 |
+
validation = self.validate_apis()
|
| 52 |
+
|
| 53 |
+
if validation['foursquare']:
|
| 54 |
+
print("β Foursquare API: Connected and working")
|
| 55 |
+
else:
|
| 56 |
+
print("β οΈ Foursquare API: Connection issues or invalid key")
|
| 57 |
+
|
| 58 |
+
if validation['anthropic']:
|
| 59 |
+
print("β Anthropic API Key: Configured")
|
| 60 |
+
else:
|
| 61 |
+
print("β οΈ Anthropic API Key: Not configured")
|
| 62 |
+
print("βΉοΈ Set environment variable: ANTHROPIC_API_KEY=your_key")
|
| 63 |
+
|
| 64 |
+
# Global configuration instance
|
| 65 |
+
api_config = APIConfig()
|
| 66 |
+
|
| 67 |
+
# Location coordinates for major cities
|
| 68 |
+
CITY_COORDINATES = {
|
| 69 |
+
'seattle': '47.6062,-122.3321',
|
| 70 |
+
'pike place': '47.6089,-122.3401',
|
| 71 |
+
'downtown seattle': '47.6085,-122.3351',
|
| 72 |
+
'new york': '40.7128,-74.0060',
|
| 73 |
+
'san francisco': '37.7749,-122.4194',
|
| 74 |
+
'los angeles': '34.0522,-118.2437',
|
| 75 |
+
'chicago': '41.8781,-87.6298',
|
| 76 |
+
'boston': '42.3601,-71.0589',
|
| 77 |
+
'portland': '45.5155,-122.6789',
|
| 78 |
+
'denver': '39.7392,-104.9903',
|
| 79 |
+
'miami': '25.7617,-80.1918',
|
| 80 |
+
'las vegas': '36.1699,-115.1398',
|
| 81 |
+
'phoenix': '33.4484,-112.0740',
|
| 82 |
+
'philadelphia': '39.9526,-75.1652',
|
| 83 |
+
'houston': '29.7604,-95.3698',
|
| 84 |
+
'dallas': '32.7767,-96.7970',
|
| 85 |
+
'atlanta': '33.7490,-84.3880',
|
| 86 |
+
'detroit': '42.3314,-83.0458',
|
| 87 |
+
'orlando': '28.5383,-81.3792',
|
| 88 |
+
'san diego': '32.7157,-117.1611',
|
| 89 |
+
# Additional California cities
|
| 90 |
+
'folsom': '38.6780,-121.1760',
|
| 91 |
+
'sacramento': '38.5816,-121.4944',
|
| 92 |
+
'san jose': '37.3382,-121.8863',
|
| 93 |
+
'oakland': '37.8044,-122.2711',
|
| 94 |
+
'fresno': '36.7378,-119.7871',
|
| 95 |
+
'long beach': '33.7701,-118.1937',
|
| 96 |
+
'anaheim': '33.8366,-117.9143',
|
| 97 |
+
'riverside': '33.9533,-117.3962',
|
| 98 |
+
'stockton': '37.9577,-121.2908',
|
| 99 |
+
'bakersfield': '35.3733,-119.0187',
|
| 100 |
+
# Additional major cities
|
| 101 |
+
'austin': '30.2672,-97.7431',
|
| 102 |
+
'nashville': '36.1627,-86.7816',
|
| 103 |
+
'memphis': '35.1495,-90.0490',
|
| 104 |
+
'milwaukee': '43.0389,-87.9065',
|
| 105 |
+
'kansas city': '39.0997,-94.5786',
|
| 106 |
+
'colorado springs': '38.8339,-104.8214',
|
| 107 |
+
'virginia beach': '36.8529,-75.9780',
|
| 108 |
+
'indianapolis': '39.7684,-86.1581',
|
| 109 |
+
'charlotte': '35.2271,-80.8431',
|
| 110 |
+
'jacksonville': '30.3322,-81.6557',
|
| 111 |
+
'columbus': '39.9612,-82.9988',
|
| 112 |
+
'fort worth': '32.7555,-97.3308',
|
| 113 |
+
'san antonio': '29.4241,-98.4936',
|
| 114 |
+
'el paso': '31.7619,-106.4850'
|
| 115 |
+
}
|
utils/formatting.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Shared formatting utilities for consistent output across all tools.
|
| 3 |
+
"""
|
| 4 |
+
from typing import List, Dict, Any
|
| 5 |
+
|
| 6 |
+
class ResponseFormatter:
|
| 7 |
+
"""Handles consistent formatting of responses with emojis and structure."""
|
| 8 |
+
|
| 9 |
+
@staticmethod
|
| 10 |
+
def format_restaurant_response(location: str, distance: float, restaurants: List[Dict]) -> str:
|
| 11 |
+
"""Format restaurant search results."""
|
| 12 |
+
output = f"π½οΈ **Top Restaurants in {location.title()}** π½οΈ\n"
|
| 13 |
+
output += f"π *Within {distance} miles*\n\n"
|
| 14 |
+
output += f"π― Found {len(restaurants)} amazing restaurants! Here are the top picks:\n\n"
|
| 15 |
+
|
| 16 |
+
for i, restaurant in enumerate(restaurants, 1):
|
| 17 |
+
output += f"**#{i} {restaurant['name']}** β\n"
|
| 18 |
+
output += f"π΄ **Cuisine:** {restaurant['cuisine_type']}\n"
|
| 19 |
+
output += f"π **Address:** {restaurant['address']}\n"
|
| 20 |
+
output += f"πΆ **Distance:** {restaurant['distance']}\n"
|
| 21 |
+
|
| 22 |
+
if restaurant['rating'] != 'Not rated':
|
| 23 |
+
output += f"β **Rating:** {restaurant['rating']}\n"
|
| 24 |
+
|
| 25 |
+
if restaurant['price']:
|
| 26 |
+
output += f"π° **Price:** {restaurant['price']}\n"
|
| 27 |
+
|
| 28 |
+
output += f"π¨βπ³ **Recommended Dishes:**\n"
|
| 29 |
+
for dish in restaurant['recommended_dishes']:
|
| 30 |
+
output += f" β’ {dish}\n"
|
| 31 |
+
|
| 32 |
+
if restaurant['description'] != 'No description available':
|
| 33 |
+
output += f"βΉοΈ **About:** {restaurant['description'][:150]}...\n"
|
| 34 |
+
|
| 35 |
+
output += "\n" + "β" * 50 + "\n\n"
|
| 36 |
+
|
| 37 |
+
output += "π *Enjoy your dining experience!* π"
|
| 38 |
+
return output
|
| 39 |
+
|
| 40 |
+
@staticmethod
|
| 41 |
+
def format_place_response(location: str, places: List[Dict]) -> str:
|
| 42 |
+
"""Format place/hotel search results."""
|
| 43 |
+
output = f"π¨ **Places to Stay in {location.title()}** π¨\n\n"
|
| 44 |
+
output += f"π― Found {len(places)} amazing places! Here are the top suggestions:\n\n"
|
| 45 |
+
|
| 46 |
+
for i, place in enumerate(places, 1):
|
| 47 |
+
output += f"**#{i} {place['name']}** β\n"
|
| 48 |
+
output += f"π¨ **Type:** {place['type']}\n"
|
| 49 |
+
output += f"π **Address:** {place['address']}\n"
|
| 50 |
+
output += f"πΆ **Distance:** {place['distance']}\n"
|
| 51 |
+
|
| 52 |
+
if place['rating'] != 'Not rated':
|
| 53 |
+
output += f"β **Rating:** {place['rating']}\n"
|
| 54 |
+
|
| 55 |
+
if place['description'] != 'No description available':
|
| 56 |
+
output += f"βΉοΈ **About:** {place['description'][:150]}...\n"
|
| 57 |
+
|
| 58 |
+
output += "\n" + "β" * 50 + "\n\n"
|
| 59 |
+
|
| 60 |
+
output += "π *Have a wonderful stay!* π"
|
| 61 |
+
return output
|
| 62 |
+
|
| 63 |
+
@staticmethod
|
| 64 |
+
def format_hiking_response(location: str, max_distance: int, difficulty: str,
|
| 65 |
+
hikes: List[Dict], stats: Dict) -> str:
|
| 66 |
+
"""Format hiking trail search results."""
|
| 67 |
+
output = f"ποΈ **Hiking Trails near {location.title()}** ποΈ\n"
|
| 68 |
+
output += f"π *Within {max_distance} miles, Difficulty: {difficulty}*\n\n"
|
| 69 |
+
|
| 70 |
+
# Add statistics
|
| 71 |
+
output += "π **Quick Stats:**\n"
|
| 72 |
+
output += f"β’ π₯Ύ **Total Trails Found:** {stats['total_hikes']}\n"
|
| 73 |
+
output += f"β’ π **Average Distance:** {stats['avg_distance']} miles\n"
|
| 74 |
+
output += f"β’ β°οΈ **Average Elevation Gain:** {stats['avg_elevation']:,} ft\n"
|
| 75 |
+
output += f"β’ β **Average Rating:** {stats['avg_rating']}\n\n"
|
| 76 |
+
|
| 77 |
+
# Add difficulty distribution
|
| 78 |
+
output += "π― **Difficulty Distribution:**\n"
|
| 79 |
+
for diff, count in stats['difficulty_distribution'].items():
|
| 80 |
+
output += f"β’ {diff}: {count} trails\n"
|
| 81 |
+
output += "\n"
|
| 82 |
+
|
| 83 |
+
# Add trail details
|
| 84 |
+
output += "π₯Ύ **Top Recommended Trails:**\n\n"
|
| 85 |
+
for i, hike in enumerate(hikes[:5], 1): # Limit to top 5
|
| 86 |
+
output += f"**#{i} {hike['name']}** ποΈ\n"
|
| 87 |
+
output += f"π― **Difficulty:** {hike['difficulty_level']} (Score: {hike['difficulty_score']:.1f}/100)\n"
|
| 88 |
+
output += f"π **Distance:** {hike['distance']} miles\n"
|
| 89 |
+
output += f"β°οΈ **Elevation Gain:** {hike['elevation_gain']:,} ft\n"
|
| 90 |
+
output += f"β **Rating:** {hike['rating']} ({hike['reviews']} reviews)\n"
|
| 91 |
+
output += f"π **Distance from you:** {hike['distance_from_user']} miles\n"
|
| 92 |
+
output += f"π **Features:** {', '.join(hike['features'])}\n"
|
| 93 |
+
output += f"π **Source:** {hike.get('source', 'Database')}\n"
|
| 94 |
+
output += "\n" + "β" * 50 + "\n\n"
|
| 95 |
+
|
| 96 |
+
output += "π² *Happy hiking and stay safe on the trails!* π²"
|
| 97 |
+
return output
|
| 98 |
+
|
| 99 |
+
@staticmethod
|
| 100 |
+
def format_sentiment_response(text: str, polarity: float = 0.5,
|
| 101 |
+
subjectivity: float = 0.5, assessment: str = "neutral") -> str:
|
| 102 |
+
"""Format sentiment analysis results."""
|
| 103 |
+
output = f"π **Sentiment Analysis Result** π\n\n"
|
| 104 |
+
output += f"π **Text Analyzed:** {text}\n\n"
|
| 105 |
+
output += f"π **Analysis:**\n"
|
| 106 |
+
output += f"β’ π **Polarity:** {polarity} ({'Positive' if polarity > 0.1 else 'Negative' if polarity < -0.1 else 'Neutral'})\n"
|
| 107 |
+
output += f"β’ π€ **Subjectivity:** {subjectivity} ({'Subjective' if subjectivity > 0.5 else 'Objective'})\n"
|
| 108 |
+
output += f"β’ π **Overall Assessment:** {assessment.title()} sentiment\n\n"
|
| 109 |
+
|
| 110 |
+
if polarity > 0.3:
|
| 111 |
+
output += f"π‘ *This text expresses positive emotions and favorable opinions.*"
|
| 112 |
+
elif polarity < -0.3:
|
| 113 |
+
output += f"π‘ *This text expresses negative emotions and unfavorable opinions.*"
|
| 114 |
+
else:
|
| 115 |
+
output += f"π‘ *This text shows a balanced emotional tone with neutral sentiment.*"
|
| 116 |
+
|
| 117 |
+
return output
|
| 118 |
+
|
| 119 |
+
@staticmethod
|
| 120 |
+
def format_error(error_message: str) -> str:
|
| 121 |
+
"""Format error messages consistently."""
|
| 122 |
+
return f"β **Error:** {error_message}"
|
| 123 |
+
|
| 124 |
+
@staticmethod
|
| 125 |
+
def format_no_results(service_type: str, location: str = "") -> str:
|
| 126 |
+
"""Format no results messages."""
|
| 127 |
+
emoji_map = {
|
| 128 |
+
"restaurants": "π½οΈ",
|
| 129 |
+
"places": "π¨",
|
| 130 |
+
"hotels": "π¨",
|
| 131 |
+
"trails": "ποΈ",
|
| 132 |
+
"hikes": "ποΈ"
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
emoji = emoji_map.get(service_type.lower(), "π")
|
| 136 |
+
location_text = f" in {location}" if location else ""
|
| 137 |
+
|
| 138 |
+
return f"{emoji} **No {service_type} found{location_text}.** Try expanding your search radius or using a different location."
|