Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- .env.name +15 -0
- .gitignore +4 -0
- .hfignore +5 -0
- Dockerfile +20 -0
- README.md +12 -0
- agent/__init__.py +0 -0
- agent/agentic_workflow.py +83 -0
- agent/agentic_workflownew.py +138 -0
- app/Dockerfile +19 -0
- app/README.md +20 -0
- app/requirements.txt +4 -0
- app/streamlit_app.py +83 -0
- config/__init__.py +0 -0
- config/config.yaml +12 -0
- exception/__init__.py +0 -0
- exception/exception_handling.py +0 -0
- logger/__init__.py +0 -0
- logger/decorators.py +13 -0
- main.py +49 -0
- mainnew.py +50 -0
- my_graph.png +0 -0
- notebook/experiments.ipynb +0 -0
- prompt_library/__init__.py +0 -0
- prompt_library/prompt.py +56 -0
- pyproject.toml +7 -0
- requirements-local.txt +20 -0
- requirements.txt +18 -0
- setup.py +37 -0
- tools/__init__.py +0 -0
- tools/arthmatic_op_tool.py +66 -0
- tools/currency_conversion_tool.py +53 -0
- tools/expense_calculator_tool.py +76 -0
- tools/place_search_tool.py +118 -0
- tools/weather_info_tool.py +77 -0
- utils/__init__.py +0 -0
- utils/config_loader.py +8 -0
- utils/currency_converter.py +16 -0
- utils/expense_calculator.py +43 -0
- utils/model_loader.py +67 -0
- utils/place_info_search.py +78 -0
- utils/save_to_document.py +40 -0
- utils/weather_info.py +34 -0
.env.name
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
OPEN_API_KEY=""
|
| 2 |
+
GROQ_API_KEY=""
|
| 3 |
+
GOOGLE_API_KEY=""
|
| 4 |
+
GPLACES_API_KEY="" # https://console.cloud.google.com/ - create a project, enable google, and then generate the key
|
| 5 |
+
FOURSQUARE_API_KEY=""
|
| 6 |
+
TAVILAY_API_KEY="" # https://app.tavily.com/home - my Github login - It scraps the internet webpages.
|
| 7 |
+
OPENWEATHER_API_KEY=""
|
| 8 |
+
EXCHANGE_RATE_API_KEY="" # https://www.exchangerate-api.com/ - 2 week validatity on my yahooid/brosnia
|
| 9 |
+
ALPHAVANTAGE_API_KEY="" # https://www.alphavantage.co/ - Free exchange rate for currency conversion. Also you can get current news or real-time stock data
|
| 10 |
+
LANGSMITH_TRACING=true
|
| 11 |
+
LANGSMITH_ENDPOINT="https://api.smith.langchain.com" # https://smith.langchain.com/ - login using google login
|
| 12 |
+
LANGSMITH_API_KEY=""
|
| 13 |
+
LANGSMITH_PROJECT="ai-trip-planner"
|
| 14 |
+
|
| 15 |
+
##My Org: kube9t.com / Email: bibhup_mishra@yahoo.com or bm80177@gmail.com
|
.gitignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_venv
|
| 2 |
+
.env
|
| 3 |
+
ai_trip_planner.egg-info
|
| 4 |
+
**/__pycache__
|
.hfignore
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.bin
|
| 2 |
+
*.sqlite3
|
| 3 |
+
*.pdf
|
| 4 |
+
*.png
|
| 5 |
+
*.jpg
|
Dockerfile
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use a lightweight Python image
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# Set working directory inside the container
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Copy only requirements first for better caching
|
| 8 |
+
COPY requirements.txt .
|
| 9 |
+
|
| 10 |
+
# Install dependencies
|
| 11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 12 |
+
|
| 13 |
+
# Copy the rest of the application code
|
| 14 |
+
COPY . .
|
| 15 |
+
|
| 16 |
+
# Expose the HF Spaces default port
|
| 17 |
+
EXPOSE 7860
|
| 18 |
+
|
| 19 |
+
# Run Uvicorn on port 7860 (HF Spaces only exposes 7860)
|
| 20 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: AI Trip Planner API
|
| 3 |
+
emoji: 🌍
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# AI Trip Planner API
|
| 11 |
+
|
| 12 |
+
Docker-based API for AI-powered trip planning
|
agent/__init__.py
ADDED
|
File without changes
|
agent/agentic_workflow.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from utils.model_loader import ModelLoader
|
| 3 |
+
from prompt_library.prompt import SYSTEM_PROMPT
|
| 4 |
+
from langgraph.graph import StateGraph, MessagesState, END, START
|
| 5 |
+
from langgraph.graph.message import add_messages # Is reducer function that just aggregrates the messages.
|
| 6 |
+
from langgraph.prebuilt import ToolNode, tools_condition
|
| 7 |
+
from tools.weather_info_tool import WeatherInfoTool
|
| 8 |
+
from tools.place_search_tool import PlaceSearchTool
|
| 9 |
+
from tools.expense_calculator_tool import CalculatorTool
|
| 10 |
+
from tools.currency_conversion_tool import CurrencyConverterTool
|
| 11 |
+
from typing import TypedDict, Sequence, Optional, Annotated
|
| 12 |
+
from langchain_core.messages import BaseMessage, HumanMessage
|
| 13 |
+
from datetime import datetime
|
| 14 |
+
|
| 15 |
+
class AgentState(TypedDict):
|
| 16 |
+
# messages is a list (or other sequence) of BaseMessage objects, and it has extra metadata attached to it: add_messages
|
| 17 |
+
## Sequence is a type hint that represents any ordered, iterable collection of items — like lists, tuples, or strings
|
| 18 |
+
## Annotated type lets you attach metadata to a type. It's not used by Python itself, but frameworks (like LangGraph, Pydantic, FastAPI) can use it.
|
| 19 |
+
messages: Annotated[Sequence[BaseMessage], add_messages]
|
| 20 |
+
|
| 21 |
+
class GraphBuilder():
|
| 22 |
+
def __init__(self,model_provider: str = "openai"):
|
| 23 |
+
self.model_loader = ModelLoader(model_provider=model_provider)
|
| 24 |
+
self.llm = self.model_loader.load_llm()
|
| 25 |
+
|
| 26 |
+
self.tools = []
|
| 27 |
+
|
| 28 |
+
self.weather_tools = WeatherInfoTool()
|
| 29 |
+
self.place_search_tools = PlaceSearchTool()
|
| 30 |
+
self.calculator_tools = CalculatorTool()
|
| 31 |
+
self.currency_converter_tools = CurrencyConverterTool()
|
| 32 |
+
|
| 33 |
+
self.tools.extend([* self.weather_tools.weather_tool_list,
|
| 34 |
+
* self.place_search_tools.place_search_tool_list,
|
| 35 |
+
* self.calculator_tools.calculator_tool_list,
|
| 36 |
+
* self.currency_converter_tools.currency_converter_tool_list])
|
| 37 |
+
|
| 38 |
+
self.llm_with_tools = self.llm.bind_tools(tools=self.tools)
|
| 39 |
+
|
| 40 |
+
self.graph = None
|
| 41 |
+
|
| 42 |
+
self.system_prompt = SYSTEM_PROMPT
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def agent_function(self,state: MessagesState):
|
| 46 |
+
"""Main agent function"""
|
| 47 |
+
user_question = state["messages"]
|
| 48 |
+
input_question = [self.system_prompt] + user_question
|
| 49 |
+
# with mlflow.start_run(run_name="genai-trip-planner-llm-call", nested=True):
|
| 50 |
+
response = self.llm_with_tools.invoke(input_question)
|
| 51 |
+
return {"messages": [response]}
|
| 52 |
+
|
| 53 |
+
def should_continue(self, state: AgentState) -> AgentState:
|
| 54 |
+
messages = state['messages']
|
| 55 |
+
last_message = messages[-1]
|
| 56 |
+
if not last_message.tool_calls:
|
| 57 |
+
return "end"
|
| 58 |
+
else:
|
| 59 |
+
return "continue"
|
| 60 |
+
|
| 61 |
+
def build_graph(self):
|
| 62 |
+
|
| 63 |
+
graph_builder=StateGraph(MessagesState)
|
| 64 |
+
graph_builder.add_node("agent", self.agent_function)
|
| 65 |
+
graph_builder.add_node("tools", ToolNode(tools=self.tools))
|
| 66 |
+
graph_builder.add_edge(START,"agent")
|
| 67 |
+
# graph_builder.add_conditional_edges("agent",tools_condition)
|
| 68 |
+
graph_builder.add_conditional_edges(
|
| 69 |
+
source="agent",
|
| 70 |
+
path=self.should_continue,
|
| 71 |
+
path_map={
|
| 72 |
+
# Edge: Node
|
| 73 |
+
"end": END,
|
| 74 |
+
"continue": "tools"
|
| 75 |
+
}
|
| 76 |
+
)
|
| 77 |
+
graph_builder.add_edge("tools","agent")
|
| 78 |
+
# graph_builder.add_edge("agent",END)
|
| 79 |
+
self.graph = graph_builder.compile()
|
| 80 |
+
return self.graph
|
| 81 |
+
|
| 82 |
+
def __call__(self):
|
| 83 |
+
return self.build_graph()
|
agent/agentic_workflownew.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import asyncio
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from typing import TypedDict, Sequence, Optional, Annotated, List
|
| 5 |
+
|
| 6 |
+
from langchain_core.messages import BaseMessage, HumanMessage, AIMessageChunk
|
| 7 |
+
from langchain_core.runnables import RunnableConfig
|
| 8 |
+
|
| 9 |
+
from langgraph.graph import StateGraph, MessagesState, END, START
|
| 10 |
+
from langgraph.graph.message import add_messages
|
| 11 |
+
from langgraph.prebuilt import ToolNode, tools_condition
|
| 12 |
+
|
| 13 |
+
from utils.model_loader import ModelLoader
|
| 14 |
+
from prompt_library.prompt import SYSTEM_PROMPT
|
| 15 |
+
|
| 16 |
+
from tools.weather_info_tool import WeatherInfoTool
|
| 17 |
+
from tools.place_search_tool import PlaceSearchTool
|
| 18 |
+
from tools.expense_calculator_tool import CalculatorTool
|
| 19 |
+
from tools.currency_conversion_tool import CurrencyConverterTool
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class AgentState(TypedDict):
|
| 23 |
+
messages: Annotated[Sequence[BaseMessage], add_messages]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class GraphBuilder:
|
| 27 |
+
def __init__(self, model_provider: str = "openai"):
|
| 28 |
+
self.model_loader = ModelLoader(model_provider=model_provider)
|
| 29 |
+
self.llm = self.model_loader.load_llm()
|
| 30 |
+
|
| 31 |
+
self.tools = []
|
| 32 |
+
|
| 33 |
+
# Load tools
|
| 34 |
+
self.weather_tools = WeatherInfoTool()
|
| 35 |
+
self.place_search_tools = PlaceSearchTool()
|
| 36 |
+
self.calculator_tools = CalculatorTool()
|
| 37 |
+
self.currency_converter_tools = CurrencyConverterTool()
|
| 38 |
+
|
| 39 |
+
# Register tools
|
| 40 |
+
self.tools.extend([
|
| 41 |
+
*self.weather_tools.weather_tool_list,
|
| 42 |
+
*self.place_search_tools.place_search_tool_list,
|
| 43 |
+
*self.calculator_tools.calculator_tool_list,
|
| 44 |
+
*self.currency_converter_tools.currency_converter_tool_list
|
| 45 |
+
])
|
| 46 |
+
|
| 47 |
+
# Bind LLM with tools
|
| 48 |
+
self.llm_with_tools = self.llm.bind_tools(tools=self.tools)
|
| 49 |
+
|
| 50 |
+
self.graph = None
|
| 51 |
+
self.system_prompt = SYSTEM_PROMPT
|
| 52 |
+
|
| 53 |
+
async def async_agent_function(self, state: MessagesState):
|
| 54 |
+
"""
|
| 55 |
+
Async agent function that streams the LLM response.
|
| 56 |
+
"""
|
| 57 |
+
user_question = state["messages"]
|
| 58 |
+
input_question = [self.system_prompt] + user_question
|
| 59 |
+
|
| 60 |
+
full_response = ""
|
| 61 |
+
async for chunk in self.llm_with_tools.astream(input_question):
|
| 62 |
+
if isinstance(chunk, AIMessageChunk):
|
| 63 |
+
content = chunk.content or ""
|
| 64 |
+
full_response += content
|
| 65 |
+
print(f"[LLM] {content}", end="", flush=True)
|
| 66 |
+
|
| 67 |
+
print() # newline after full message
|
| 68 |
+
return {"messages": [chunk]}
|
| 69 |
+
|
| 70 |
+
def should_continue(self, state: AgentState) -> str:
|
| 71 |
+
"""
|
| 72 |
+
Decide whether to continue to tools or end the graph.
|
| 73 |
+
"""
|
| 74 |
+
messages = state['messages']
|
| 75 |
+
last_message = messages[-1]
|
| 76 |
+
if not last_message.tool_calls:
|
| 77 |
+
return "end"
|
| 78 |
+
else:
|
| 79 |
+
return "continue"
|
| 80 |
+
|
| 81 |
+
def build_graph(self):
|
| 82 |
+
"""
|
| 83 |
+
Build the LangGraph state machine with streaming.
|
| 84 |
+
"""
|
| 85 |
+
graph_builder = StateGraph(MessagesState)
|
| 86 |
+
|
| 87 |
+
# Add nodes
|
| 88 |
+
graph_builder.add_node("agent", self.async_agent_function)
|
| 89 |
+
graph_builder.add_node("tools", ToolNode(tools=self.tools))
|
| 90 |
+
|
| 91 |
+
# Add transitions
|
| 92 |
+
graph_builder.add_edge(START, "agent")
|
| 93 |
+
graph_builder.add_conditional_edges(
|
| 94 |
+
source="agent",
|
| 95 |
+
path=self.should_continue,
|
| 96 |
+
path_map={
|
| 97 |
+
"end": END,
|
| 98 |
+
"continue": "tools"
|
| 99 |
+
}
|
| 100 |
+
)
|
| 101 |
+
graph_builder.add_edge("tools", "agent")
|
| 102 |
+
|
| 103 |
+
self.graph = graph_builder.compile()
|
| 104 |
+
return self.graph
|
| 105 |
+
|
| 106 |
+
async def stream(self, user_input: str):
|
| 107 |
+
"""
|
| 108 |
+
Stream the response end-to-end from graph.
|
| 109 |
+
"""
|
| 110 |
+
if self.graph is None:
|
| 111 |
+
self.build_graph()
|
| 112 |
+
|
| 113 |
+
print(f"[User] {user_input}\n")
|
| 114 |
+
async for event in self.graph.astream(
|
| 115 |
+
{"messages": [HumanMessage(content=user_input)]},
|
| 116 |
+
config=RunnableConfig()
|
| 117 |
+
):
|
| 118 |
+
if 'messages' in event:
|
| 119 |
+
for msg in event['messages']:
|
| 120 |
+
print(f"\n[Tool/Final] {msg.content}\n")
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
async def __call__(self, user_input: str) -> str:
|
| 124 |
+
if self.graph is None:
|
| 125 |
+
self.build_graph()
|
| 126 |
+
|
| 127 |
+
final_output = ""
|
| 128 |
+
|
| 129 |
+
async for event in self.graph.astream(
|
| 130 |
+
{"messages": [HumanMessage(content=user_input)]},
|
| 131 |
+
config=RunnableConfig()
|
| 132 |
+
):
|
| 133 |
+
if "messages" in event:
|
| 134 |
+
for msg in event["messages"]:
|
| 135 |
+
final_output = msg.content # only keep last
|
| 136 |
+
print(f"[Tool/LLM] {msg.content}")
|
| 137 |
+
|
| 138 |
+
return final_output
|
app/Dockerfile
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use lightweight Python base image
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# Set working directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Copy app code and requirements
|
| 8 |
+
COPY requirements.txt .
|
| 9 |
+
COPY . .
|
| 10 |
+
|
| 11 |
+
# Install dependencies
|
| 12 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 13 |
+
|
| 14 |
+
# Expose HF Spaces port
|
| 15 |
+
EXPOSE 7860
|
| 16 |
+
|
| 17 |
+
# Run Streamlit
|
| 18 |
+
# CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.headless=true"]
|
| 19 |
+
CMD ["streamlit", "run", "app/streamlit_app.py", "--server.port=7860", "--server.headless=true"] # IGNORE
|
app/README.md
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Trip Advisor App
|
| 3 |
+
emoji: 🚀
|
| 4 |
+
colorFrom: red
|
| 5 |
+
colorTo: red
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 8501
|
| 8 |
+
tags:
|
| 9 |
+
- streamlit
|
| 10 |
+
pinned: false
|
| 11 |
+
short_description: AI trip advisor app
|
| 12 |
+
license: mit
|
| 13 |
+
---
|
| 14 |
+
|
| 15 |
+
# Welcome to Streamlit!
|
| 16 |
+
|
| 17 |
+
Edit `/src/streamlit_app.py` to customize this app to your heart's desire. :heart:
|
| 18 |
+
|
| 19 |
+
If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
|
| 20 |
+
forums](https://discuss.streamlit.io).
|
app/requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
altair
|
| 2 |
+
pandas
|
| 3 |
+
streamlit
|
| 4 |
+
requests
|
app/streamlit_app.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import requests
|
| 3 |
+
import datetime
|
| 4 |
+
|
| 5 |
+
BASE_URL = "https://huggingface.co/spaces/mishrabp/trip-advisor-api" # Backend endpoint
|
| 6 |
+
|
| 7 |
+
# Set up Streamlit page configuration
|
| 8 |
+
st.set_page_config(
|
| 9 |
+
page_title="Travel Planner Agentic Application",
|
| 10 |
+
page_icon="🌍",
|
| 11 |
+
layout="wide",
|
| 12 |
+
initial_sidebar_state="expanded",
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
# ---------------------------
|
| 16 |
+
# Sidebar Navigation
|
| 17 |
+
# ---------------------------
|
| 18 |
+
with st.sidebar:
|
| 19 |
+
st.image("https://cdn-icons-png.flaticon.com/512/201/201623.png", width=80)
|
| 20 |
+
st.title("🌍 Travel Planner")
|
| 21 |
+
st.markdown("Plan your trips effortlessly with AI ✈️")
|
| 22 |
+
st.markdown("---")
|
| 23 |
+
st.subheader("Navigation")
|
| 24 |
+
st.page_link("https://openai.com", label="🏠 Home", disabled=True)
|
| 25 |
+
st.page_link("https://openai.com", label="🧳 My Trips", disabled=True)
|
| 26 |
+
st.page_link("https://openai.com", label="⚙️ Settings", disabled=True)
|
| 27 |
+
st.markdown("---")
|
| 28 |
+
st.caption("Powered by Kube9t's Travel Agent AI")
|
| 29 |
+
|
| 30 |
+
# ---------------------------
|
| 31 |
+
# Hero Section
|
| 32 |
+
# ---------------------------
|
| 33 |
+
st.markdown(
|
| 34 |
+
"""
|
| 35 |
+
<div style="background-color:#0c2a45; padding:2rem 1rem; border-radius:10px;">
|
| 36 |
+
<h1 style="color:white; text-align:center;">🌍 Travel Planner Agentic Application</h1>
|
| 37 |
+
<p style="color:white; text-align:center; font-size:1.2rem;">
|
| 38 |
+
Let me help you design your next perfect trip — just tell me where you want to go!
|
| 39 |
+
</p>
|
| 40 |
+
</div>
|
| 41 |
+
""",
|
| 42 |
+
unsafe_allow_html=True
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
st.markdown("### ✨ What can I help you plan today?")
|
| 46 |
+
|
| 47 |
+
# Initialize chat history
|
| 48 |
+
if "messages" not in st.session_state:
|
| 49 |
+
st.session_state.messages = []
|
| 50 |
+
|
| 51 |
+
# Chat input form
|
| 52 |
+
with st.form(key="query_form", clear_on_submit=True):
|
| 53 |
+
user_input = st.text_input("Ask me something like: 'Plan a 7 days trip to Washnigton-DC, NewYork, and Niagra.'")
|
| 54 |
+
submit_button = st.form_submit_button("Send")
|
| 55 |
+
|
| 56 |
+
# Handle form submission
|
| 57 |
+
if submit_button and user_input.strip():
|
| 58 |
+
try:
|
| 59 |
+
with st.spinner("🧠 Thinking..."):
|
| 60 |
+
payload = {"question": user_input}
|
| 61 |
+
response = requests.post(f"{BASE_URL}/query", json=payload)
|
| 62 |
+
|
| 63 |
+
if response.status_code == 200:
|
| 64 |
+
answer = response.json().get("answer", "No answer returned.")
|
| 65 |
+
markdown_content = f"""
|
| 66 |
+
### 🗺️ AI-Generated Travel Plan
|
| 67 |
+
**Generated:** {datetime.datetime.now().strftime('%Y-%m-%d at %H:%M')}
|
| 68 |
+
**Created by:** Kube9t's Travel Agent
|
| 69 |
+
|
| 70 |
+
---
|
| 71 |
+
|
| 72 |
+
{answer}
|
| 73 |
+
|
| 74 |
+
---
|
| 75 |
+
|
| 76 |
+
📝 *Please double-check all travel details, costs, and dates before booking.*
|
| 77 |
+
"""
|
| 78 |
+
st.markdown(markdown_content)
|
| 79 |
+
else:
|
| 80 |
+
st.error("❌ Bot failed to respond: " + response.text)
|
| 81 |
+
|
| 82 |
+
except Exception as e:
|
| 83 |
+
st.error(f"⚠️ Something went wrong: {e}")
|
config/__init__.py
ADDED
|
File without changes
|
config/config.yaml
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
llm:
|
| 2 |
+
openai:
|
| 3 |
+
provider: "openai"
|
| 4 |
+
model_name: "gpt-4o-mini"
|
| 5 |
+
groq:
|
| 6 |
+
provider: "groq"
|
| 7 |
+
model_name: "llama-3.1-70b-versatile"
|
| 8 |
+
azureopenai:
|
| 9 |
+
provider: "azureopenai"
|
| 10 |
+
model_name: "gpt-4o"
|
| 11 |
+
endpoint: "https://srepoc-ai-services.openai.azure.com/"
|
| 12 |
+
api_version: "2025-01-01-preview"
|
exception/__init__.py
ADDED
|
File without changes
|
exception/exception_handling.py
ADDED
|
File without changes
|
logger/__init__.py
ADDED
|
File without changes
|
logger/decorators.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
logging.basicConfig(level=logging.INFO)
|
| 5 |
+
logger = logging.getLogger(__name__)
|
| 6 |
+
|
| 7 |
+
def log_entry(func):
|
| 8 |
+
@functools.wraps(func)
|
| 9 |
+
def wrapper(*args, **kwargs):
|
| 10 |
+
arg_list = [repr(a) for a in args] + [f"{k}={v!r}" for k, v in kwargs.items()]
|
| 11 |
+
logger.info(f"➡️ Entering: {func.__name__}({', '.join(arg_list)})")
|
| 12 |
+
return func(*args, **kwargs)
|
| 13 |
+
return wrapper
|
main.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from fastapi import FastAPI
|
| 3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
+
from agent.agentic_workflow import GraphBuilder
|
| 5 |
+
from fastapi.responses import JSONResponse
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
from pydantic import BaseModel
|
| 8 |
+
import datetime
|
| 9 |
+
from typing import List
|
| 10 |
+
|
| 11 |
+
load_dotenv()
|
| 12 |
+
|
| 13 |
+
app = FastAPI()
|
| 14 |
+
|
| 15 |
+
app.add_middleware(
|
| 16 |
+
CORSMiddleware,
|
| 17 |
+
allow_origins=["*"], # Adjust for prod
|
| 18 |
+
allow_credentials=True,
|
| 19 |
+
allow_methods=["*"],
|
| 20 |
+
allow_headers=["*"],
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
class QueryRequest(BaseModel):
|
| 24 |
+
question: str
|
| 25 |
+
|
| 26 |
+
@app.post("/query")
|
| 27 |
+
async def query_travel_agent(query: QueryRequest):
|
| 28 |
+
try:
|
| 29 |
+
graph_agent = GraphBuilder(model_provider="groq")
|
| 30 |
+
|
| 31 |
+
# Collect streamed output
|
| 32 |
+
streamed_response: List[str] = []
|
| 33 |
+
|
| 34 |
+
async def capture_stream():
|
| 35 |
+
async for event in graph_agent.graph.astream(
|
| 36 |
+
{"messages": [query.question]},
|
| 37 |
+
):
|
| 38 |
+
if "messages" in event:
|
| 39 |
+
for msg in event["messages"]:
|
| 40 |
+
streamed_response.append(msg.content)
|
| 41 |
+
|
| 42 |
+
# Ensure the graph is built
|
| 43 |
+
graph_agent.build_graph()
|
| 44 |
+
await capture_stream()
|
| 45 |
+
|
| 46 |
+
return {"answer": streamed_response[-1] if streamed_response else "No response received."}
|
| 47 |
+
|
| 48 |
+
except Exception as e:
|
| 49 |
+
return JSONResponse(status_code=500, content={"error": str(e)})
|
mainnew.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from fastapi import FastAPI
|
| 3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
+
from agent.agentic_workflow import GraphBuilder
|
| 5 |
+
from utils.save_to_document import save_document
|
| 6 |
+
from fastapi.responses import JSONResponse
|
| 7 |
+
import datetime
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
from pydantic import BaseModel
|
| 10 |
+
load_dotenv()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
app = FastAPI()
|
| 14 |
+
|
| 15 |
+
app.add_middleware(
|
| 16 |
+
CORSMiddleware,
|
| 17 |
+
allow_origins=["*"], # set specific origins in prod
|
| 18 |
+
allow_credentials=True,
|
| 19 |
+
allow_methods=["*"],
|
| 20 |
+
allow_headers=["*"],
|
| 21 |
+
)
|
| 22 |
+
class QueryRequest(BaseModel):
|
| 23 |
+
question: str
|
| 24 |
+
|
| 25 |
+
@app.post("/query")
|
| 26 |
+
async def query_travel_agent(query:QueryRequest):
|
| 27 |
+
try:
|
| 28 |
+
print(query)
|
| 29 |
+
graph = GraphBuilder(model_provider="groq")
|
| 30 |
+
react_agent=graph() #it creates the __call__() method in the class
|
| 31 |
+
#react_agent = graph.build_graph()
|
| 32 |
+
|
| 33 |
+
png_graph = react_agent.get_graph().draw_mermaid_png()
|
| 34 |
+
with open("my_graph.png", "wb") as f:
|
| 35 |
+
f.write(png_graph)
|
| 36 |
+
|
| 37 |
+
print(f"Graph saved as 'my_graph.png' in {os.getcwd()}")
|
| 38 |
+
# Assuming request is a pydantic object like: {"question": "your text"}
|
| 39 |
+
messages={"messages": [query.question]}
|
| 40 |
+
output = react_agent.invoke(messages)
|
| 41 |
+
|
| 42 |
+
# If result is dict with messages:
|
| 43 |
+
if isinstance(output, dict) and "messages" in output:
|
| 44 |
+
final_output = output["messages"][-1].content # Last AI response
|
| 45 |
+
else:
|
| 46 |
+
final_output = str(output)
|
| 47 |
+
|
| 48 |
+
return {"answer": final_output}
|
| 49 |
+
except Exception as e:
|
| 50 |
+
return JSONResponse(status_code=500, content={"error": str(e)})
|
my_graph.png
ADDED
|
notebook/experiments.ipynb
ADDED
|
File without changes
|
prompt_library/__init__.py
ADDED
|
File without changes
|
prompt_library/prompt.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_core.messages import SystemMessage
|
| 2 |
+
|
| 3 |
+
# SYSTEM_PROMPT = SystemMessage(
|
| 4 |
+
# content="""You are a helpful AI Travel Agent and Expense Planner.
|
| 5 |
+
# You help users plan trips to any place worldwide with real-time data from internet.
|
| 6 |
+
|
| 7 |
+
# Provide complete, comprehensive and a detailed travel plan. Always try to provide two
|
| 8 |
+
# plans, one for the generic tourist places, another for more off-beat locations situated
|
| 9 |
+
# in and around the requested place.
|
| 10 |
+
# Give full information immediately including:
|
| 11 |
+
# - Complete day-by-day itinerary
|
| 12 |
+
# - Recommended hotels for boarding along with approx per night cost
|
| 13 |
+
# - Places of attractions around the place with details
|
| 14 |
+
# - Recommended restaurants with prices around the place
|
| 15 |
+
# - Activities around the place with details
|
| 16 |
+
# - Mode of transportations available in the place with details
|
| 17 |
+
# - Detailed cost breakdown
|
| 18 |
+
# - Per Day expense budget approximately
|
| 19 |
+
# - Weather details
|
| 20 |
+
|
| 21 |
+
# Use the available tools first to gather information and make detailed cost breakdowns before you make any assumptions.
|
| 22 |
+
# Provide everything in one comprehensive response formatted in clean Markdown.
|
| 23 |
+
|
| 24 |
+
# Note: You are travel planner. If you receive unrelevant questions, politely say that you can not assist.
|
| 25 |
+
# """
|
| 26 |
+
# )
|
| 27 |
+
|
| 28 |
+
SYSTEM_PROMPT = SystemMessage(
|
| 29 |
+
content = """
|
| 30 |
+
You are a helpful and intelligent AI Travel Agent and Expense Planner.
|
| 31 |
+
|
| 32 |
+
Your job is to plan trips to any location worldwide by using real-time information through tools provided to you.
|
| 33 |
+
|
| 34 |
+
You must follow these steps:
|
| 35 |
+
1. **Understand the user's request**: Read the user's question carefully to determine their travel needs.
|
| 36 |
+
2. **Use tools to gather data**: Call the relevant tools to fetch real-time information about weather, attractions, costs, etc.
|
| 37 |
+
3. **Plan the trip**: Create a detailed travel plan based on the gathered data.
|
| 38 |
+
4. **Respond with a comprehensive plan**: Provide a complete travel itinerary in Markdown format.
|
| 39 |
+
|
| 40 |
+
Make sure your plan includes:
|
| 41 |
+
- 📅 Day-by-day itinerary
|
| 42 |
+
- 🏨 Recommended hotels with approximate cost per night
|
| 43 |
+
- 🗺️ Attractions with descriptions
|
| 44 |
+
- 🍽️ Recommended restaurants and pricing
|
| 45 |
+
- 🛶 Activities with timing and pricing
|
| 46 |
+
- 🚗 Transportation options available
|
| 47 |
+
- 🌤️ Weather details (use weather tool)
|
| 48 |
+
- 💰 Approximate per-day and total cost breakdown
|
| 49 |
+
|
| 50 |
+
### Rules:
|
| 51 |
+
- If the user asks a travel-related question, you MUST first call the relevant tools to gather data.
|
| 52 |
+
- If the tools do not return results, only then use prior knowledge.
|
| 53 |
+
- If the question is unrelated to travel, respond politely that you can’t help with that.
|
| 54 |
+
|
| 55 |
+
"""
|
| 56 |
+
)
|
pyproject.toml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "ai-trip-planner"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "A learning project that uses AI for Trip Planning."
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.11"
|
| 7 |
+
dependencies = []
|
requirements-local.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
langchain
|
| 2 |
+
langgraph
|
| 3 |
+
langchain-community
|
| 4 |
+
langchain-experimental
|
| 5 |
+
langchain-google-community
|
| 6 |
+
langchain-google-community[places]
|
| 7 |
+
langchain_tavily
|
| 8 |
+
langchain_groq
|
| 9 |
+
langchain_openai
|
| 10 |
+
fastapi
|
| 11 |
+
python-dotenv
|
| 12 |
+
streamlit
|
| 13 |
+
uvicorn
|
| 14 |
+
httpx
|
| 15 |
+
requests
|
| 16 |
+
azure-identity
|
| 17 |
+
mlflow
|
| 18 |
+
gradio
|
| 19 |
+
|
| 20 |
+
-e .
|
requirements.txt
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
langchain
|
| 2 |
+
langgraph
|
| 3 |
+
langchain-community
|
| 4 |
+
langchain-experimental
|
| 5 |
+
langchain-google-community
|
| 6 |
+
langchain-google-community[places]
|
| 7 |
+
langchain_tavily
|
| 8 |
+
langchain_groq
|
| 9 |
+
langchain_openai
|
| 10 |
+
fastapi
|
| 11 |
+
python-dotenv
|
| 12 |
+
streamlit
|
| 13 |
+
uvicorn
|
| 14 |
+
httpx
|
| 15 |
+
requests
|
| 16 |
+
azure-identity
|
| 17 |
+
mlflow
|
| 18 |
+
gradio
|
setup.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from setuptools import find_packages,setup
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
def get_requirements()->List[str]:
|
| 5 |
+
"""
|
| 6 |
+
This function will return list of requirements
|
| 7 |
+
"""
|
| 8 |
+
requirement_list:List[str] = []
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
# Open and read the requirements.txt file
|
| 12 |
+
with open('requirements.txt', 'r') as file:
|
| 13 |
+
# Read lines from the file
|
| 14 |
+
lines = file.readlines()
|
| 15 |
+
# Process each line
|
| 16 |
+
for line in lines:
|
| 17 |
+
# Strip whitespace and newline characters
|
| 18 |
+
requirement = line.strip()
|
| 19 |
+
# Ignore empty lines and -e .
|
| 20 |
+
if requirement and requirement != '-e .':
|
| 21 |
+
requirement_list.append(requirement)
|
| 22 |
+
except FileNotFoundError:
|
| 23 |
+
print("requirements.txt file not found.")
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
return requirement_list
|
| 28 |
+
print(get_requirements())
|
| 29 |
+
|
| 30 |
+
setup(
|
| 31 |
+
name="AI-TRAVEL-PLANNER",
|
| 32 |
+
version="0.0.1",
|
| 33 |
+
author="Bibhu Mishra",
|
| 34 |
+
author_email="bm80177@gmail.com",
|
| 35 |
+
packages = find_packages(),
|
| 36 |
+
install_requires=get_requirements()
|
| 37 |
+
)
|
tools/__init__.py
ADDED
|
File without changes
|
tools/arthmatic_op_tool.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
load_dotenv()
|
| 4 |
+
from langchain.tools import tool
|
| 5 |
+
from langchain_community.utilities.alpha_vantage import AlphaVantageAPIWrapper
|
| 6 |
+
from logger.decorators import log_entry
|
| 7 |
+
|
| 8 |
+
@tool
|
| 9 |
+
@log_entry
|
| 10 |
+
def multiply(a: int, b: int) -> int:
|
| 11 |
+
"""
|
| 12 |
+
Multiply two integers.
|
| 13 |
+
|
| 14 |
+
This tool takes two integer values and returns their product.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
a (int): The first integer.
|
| 18 |
+
b (int): The second integer.
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
int: The product of `a` and `b`.
|
| 22 |
+
"""
|
| 23 |
+
print('Entered into multiply().')
|
| 24 |
+
return a * b
|
| 25 |
+
|
| 26 |
+
@tool
|
| 27 |
+
@log_entry
|
| 28 |
+
def add(a: int, b: int) -> int:
|
| 29 |
+
"""
|
| 30 |
+
Add two integers.
|
| 31 |
+
|
| 32 |
+
This tool takes two integer values and returns their sum.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
a (int): The first integer.
|
| 36 |
+
b (int): The second integer.
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
int: The sum of `a` and `b`.
|
| 40 |
+
"""
|
| 41 |
+
print('Entered into add().')
|
| 42 |
+
return a + b
|
| 43 |
+
|
| 44 |
+
@tool
|
| 45 |
+
@log_entry
|
| 46 |
+
def currency_converter(from_curr: str, to_curr: str, value: float) -> float:
|
| 47 |
+
"""
|
| 48 |
+
Convert a currency value from one currency to another using real-time exchange rates.
|
| 49 |
+
|
| 50 |
+
This tool uses the AlphaVantage API to fetch real-time exchange rates and converts
|
| 51 |
+
the given value from `from_curr` to `to_curr`.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
from_curr (str): The currency code to convert from (e.g., "USD").
|
| 55 |
+
to_curr (str): The currency code to convert to (e.g., "EUR").
|
| 56 |
+
value (float): The numeric amount in the `from_curr` currency.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
float: The equivalent amount in `to_curr` currency based on the current exchange rate.
|
| 60 |
+
"""
|
| 61 |
+
print('Entered into currency_converter().')
|
| 62 |
+
os.environ["ALPHAVANTAGE_API_KEY"] = os.getenv('ALPHAVANTAGE_API_KEY')
|
| 63 |
+
alpha_vantage = AlphaVantageAPIWrapper()
|
| 64 |
+
response = alpha_vantage._get_exchange_rate(from_curr, to_curr)
|
| 65 |
+
exchange_rate = response['Realtime Currency Exchange Rate']['5. Exchange Rate']
|
| 66 |
+
return value * float(exchange_rate)
|
tools/currency_conversion_tool.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import List
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
from langchain.tools import tool
|
| 5 |
+
from logger.decorators import log_entry
|
| 6 |
+
from utils.currency_converter import CurrencyConverter
|
| 7 |
+
|
| 8 |
+
class CurrencyConverterTool:
|
| 9 |
+
"""
|
| 10 |
+
A wrapper class that defines a LangChain-compatible tool for converting currencies
|
| 11 |
+
using a custom CurrencyConverter service.
|
| 12 |
+
|
| 13 |
+
Attributes:
|
| 14 |
+
api_key (str): API key for the currency conversion service.
|
| 15 |
+
currency_service (CurrencyConverter): Instance of the currency conversion service.
|
| 16 |
+
currency_converter_tool_list (List): List of registered LangChain tools.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
def __init__(self):
|
| 20 |
+
"""
|
| 21 |
+
Initialize the CurrencyConverterTool with API credentials and tool setup.
|
| 22 |
+
"""
|
| 23 |
+
load_dotenv()
|
| 24 |
+
self.api_key = os.environ.get("EXCHANGE_RATE_API_KEY")
|
| 25 |
+
self.currency_service = CurrencyConverter(self.api_key)
|
| 26 |
+
self.currency_converter_tool_list = self._setup_tools()
|
| 27 |
+
|
| 28 |
+
def _setup_tools(self) -> List:
|
| 29 |
+
"""
|
| 30 |
+
Define and register the currency conversion tool.
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
List: A list containing the currency conversion tool function.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
@tool
|
| 37 |
+
@log_entry
|
| 38 |
+
def convert_currency(amount: float, from_currency: str, to_currency: str) -> float:
|
| 39 |
+
"""
|
| 40 |
+
Convert an amount from one currency to another using real-time exchange rates.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
amount (float): The amount of money to convert.
|
| 44 |
+
from_currency (str): The currency code to convert from (e.g., "USD").
|
| 45 |
+
to_currency (str): The currency code to convert to (e.g., "EUR").
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
float: The equivalent amount in the target currency.
|
| 49 |
+
"""
|
| 50 |
+
print('Entered into convert_currency().')
|
| 51 |
+
return self.currency_service.convert(amount, from_currency, to_currency)
|
| 52 |
+
|
| 53 |
+
return [convert_currency]
|
tools/expense_calculator_tool.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
from langchain.tools import tool
|
| 3 |
+
from logger.decorators import log_entry
|
| 4 |
+
from utils.expense_calculator import Calculator
|
| 5 |
+
|
| 6 |
+
class CalculatorTool:
|
| 7 |
+
"""
|
| 8 |
+
A wrapper class for trip-related financial calculations using a custom Calculator utility.
|
| 9 |
+
|
| 10 |
+
This class exposes several LangChain-compatible tools for estimating hotel costs,
|
| 11 |
+
calculating total expenses, and determining daily budgets.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
def __init__(self):
|
| 15 |
+
"""
|
| 16 |
+
Initialize the CalculatorTool by creating an instance of Calculator and setting up tools.
|
| 17 |
+
"""
|
| 18 |
+
self.calculator = Calculator()
|
| 19 |
+
self.calculator_tool_list = self._setup_tools()
|
| 20 |
+
|
| 21 |
+
def _setup_tools(self) -> List:
|
| 22 |
+
"""
|
| 23 |
+
Define and register calculator tools related to trip planning.
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
List: A list of LangChain tool functions for financial calculations.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
@tool
|
| 30 |
+
@log_entry
|
| 31 |
+
def estimate_total_hotel_cost(price_per_night: str, total_days: float) -> float:
|
| 32 |
+
"""
|
| 33 |
+
Estimate the total hotel cost for a trip.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
price_per_night (str): Cost per night for the hotel stay (may include currency symbol).
|
| 37 |
+
total_days (float): Number of nights staying at the hotel.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
float: Total cost for the hotel stay.
|
| 41 |
+
"""
|
| 42 |
+
print('Entered into estimate_total_hotel_cost().')
|
| 43 |
+
return self.calculator.multiply(price_per_night, total_days)
|
| 44 |
+
|
| 45 |
+
@tool
|
| 46 |
+
@log_entry
|
| 47 |
+
def calculate_total_expense(*costs: float) -> float:
|
| 48 |
+
"""
|
| 49 |
+
Calculate the total trip expense by summing up individual cost items.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
*costs (float): A variable number of expense values (e.g., hotel, food, transport).
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
float: The total combined expense.
|
| 56 |
+
"""
|
| 57 |
+
print('Entered into calculate_total_expense().')
|
| 58 |
+
return self.calculator.calculate_total(*costs)
|
| 59 |
+
|
| 60 |
+
@tool
|
| 61 |
+
@log_entry
|
| 62 |
+
def calculate_daily_expense_budget(total_cost: float, days: int) -> float:
|
| 63 |
+
"""
|
| 64 |
+
Calculate the average daily budget for the trip.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
total_cost (float): Total trip cost.
|
| 68 |
+
days (int): Number of days in the trip.
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
float: Estimated daily budget.
|
| 72 |
+
"""
|
| 73 |
+
print('Entered into calculate_daily_expense_budget().')
|
| 74 |
+
return self.calculator.calculate_daily_budget(total_cost, days)
|
| 75 |
+
|
| 76 |
+
return [estimate_total_hotel_cost, calculate_total_expense, calculate_daily_expense_budget]
|
tools/place_search_tool.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import List
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
from langchain.tools import tool
|
| 5 |
+
from logger.decorators import log_entry
|
| 6 |
+
from utils.place_info_search import GooglePlaceSearchTool, TavilyPlaceSearchTool
|
| 7 |
+
|
| 8 |
+
class PlaceSearchTool:
|
| 9 |
+
"""
|
| 10 |
+
A wrapper class that provides LangChain-compatible tools to search for
|
| 11 |
+
attractions, restaurants, activities, and transportation options in a given place.
|
| 12 |
+
|
| 13 |
+
It uses Google Places API primarily and falls back to Tavily search if needed.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self):
|
| 17 |
+
"""
|
| 18 |
+
Initialize the PlaceSearchTool with required API keys and setup tool functions.
|
| 19 |
+
"""
|
| 20 |
+
load_dotenv()
|
| 21 |
+
self.google_api_key = os.environ.get("GPLACES_API_KEY")
|
| 22 |
+
self.google_places_search = GooglePlaceSearchTool(self.google_api_key)
|
| 23 |
+
self.tavily_search = TavilyPlaceSearchTool()
|
| 24 |
+
self.place_search_tool_list = self._setup_tools()
|
| 25 |
+
|
| 26 |
+
def _setup_tools(self) -> List:
|
| 27 |
+
"""
|
| 28 |
+
Setup and register all LangChain-compatible place search tools.
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
List: A list of tool functions that can be invoked via LangChain agents.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
@tool
|
| 35 |
+
@log_entry
|
| 36 |
+
def search_attractions(place: str) -> str:
|
| 37 |
+
"""
|
| 38 |
+
Search for popular attractions in a given place.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
place (str): The name of the place (e.g., "New York").
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
str: A list of suggested attractions retrieved from Google or Tavily.
|
| 45 |
+
"""
|
| 46 |
+
print('Entered into search_attractions().')
|
| 47 |
+
try:
|
| 48 |
+
attraction_result = self.google_places_search.google_search_attractions(place)
|
| 49 |
+
if attraction_result:
|
| 50 |
+
return f"Following are the attractions of {place} as suggested by Google: {attraction_result}"
|
| 51 |
+
except Exception as e:
|
| 52 |
+
tavily_result = self.tavily_search.tavily_search_attractions(place)
|
| 53 |
+
return f"Google cannot find the details due to {e}.\nFollowing are the attractions of {place}: {tavily_result}"
|
| 54 |
+
|
| 55 |
+
@tool
|
| 56 |
+
@log_entry
|
| 57 |
+
def search_restaurants(place: str) -> str:
|
| 58 |
+
"""
|
| 59 |
+
Search for restaurants in a given place.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
place (str): The name of the place (e.g., "San Francisco").
|
| 63 |
+
|
| 64 |
+
Returns:
|
| 65 |
+
str: A list of suggested restaurants retrieved from Google or Tavily.
|
| 66 |
+
"""
|
| 67 |
+
print('Entered into search_restaurants().')
|
| 68 |
+
try:
|
| 69 |
+
restaurants_result = self.google_places_search.google_search_restaurants(place)
|
| 70 |
+
if restaurants_result:
|
| 71 |
+
return f"Following are the restaurants of {place} as suggested by Google: {restaurants_result}"
|
| 72 |
+
except Exception as e:
|
| 73 |
+
tavily_result = self.tavily_search.tavily_search_restaurants(place)
|
| 74 |
+
return f"Google cannot find the details due to {e}.\nFollowing are the restaurants of {place}: {tavily_result}"
|
| 75 |
+
|
| 76 |
+
@tool
|
| 77 |
+
@log_entry
|
| 78 |
+
def search_activities(place: str) -> str:
|
| 79 |
+
"""
|
| 80 |
+
Search for activities available in a given place.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
place (str): The name of the place (e.g., "Chicago").
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
str: A list of activities retrieved from Google or Tavily.
|
| 87 |
+
"""
|
| 88 |
+
print('Entered into search_activities().')
|
| 89 |
+
try:
|
| 90 |
+
activities_result = self.google_places_search.google_search_activity(place)
|
| 91 |
+
if activities_result:
|
| 92 |
+
return f"Following are the activities in and around {place} as suggested by Google: {activities_result}"
|
| 93 |
+
except Exception as e:
|
| 94 |
+
tavily_result = self.tavily_search.tavily_search_activity(place)
|
| 95 |
+
return f"Google cannot find the details due to {e}.\nFollowing are the activities of {place}: {tavily_result}"
|
| 96 |
+
|
| 97 |
+
@tool
|
| 98 |
+
@log_entry
|
| 99 |
+
def search_transportation(place: str) -> str:
|
| 100 |
+
"""
|
| 101 |
+
Search for transportation options available in a given place.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
place (str): The name of the place (e.g., "Los Angeles").
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
str: A list of transportation modes retrieved from Google or Tavily.
|
| 108 |
+
"""
|
| 109 |
+
print('Entered into search_transportation().')
|
| 110 |
+
try:
|
| 111 |
+
transport_result = self.google_places_search.google_search_transportation(place)
|
| 112 |
+
if transport_result:
|
| 113 |
+
return f"Following are the modes of transportation available in {place} as suggested by Google: {transport_result}"
|
| 114 |
+
except Exception as e:
|
| 115 |
+
tavily_result = self.tavily_search.tavily_search_transportation(place)
|
| 116 |
+
return f"Google cannot find the details due to {e}.\nFollowing are the modes of transportation available in {place}: {tavily_result}"
|
| 117 |
+
|
| 118 |
+
return [search_attractions, search_restaurants, search_activities, search_transportation]
|
tools/weather_info_tool.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import List
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
from langchain.tools import tool
|
| 5 |
+
from logger.decorators import log_entry
|
| 6 |
+
from utils.weather_info import WeatherForecastTool
|
| 7 |
+
|
| 8 |
+
class WeatherInfoTool:
|
| 9 |
+
"""
|
| 10 |
+
A wrapper class for retrieving weather information using the OpenWeatherMap API.
|
| 11 |
+
|
| 12 |
+
This class provides LangChain-compatible tools to get current weather and weather forecasts
|
| 13 |
+
for a given city.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self):
|
| 17 |
+
"""
|
| 18 |
+
Initialize the WeatherInfoTool with API credentials and set up weather-related tools.
|
| 19 |
+
"""
|
| 20 |
+
load_dotenv()
|
| 21 |
+
self.api_key = os.environ.get("OPENWEATHERMAP_API_KEY")
|
| 22 |
+
self.weather_service = WeatherForecastTool(self.api_key)
|
| 23 |
+
self.weather_tool_list = self._setup_tools()
|
| 24 |
+
|
| 25 |
+
def _setup_tools(self) -> List:
|
| 26 |
+
"""
|
| 27 |
+
Define and register weather-related LangChain tool functions.
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
List: A list of LangChain-compatible weather tool functions.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
@tool
|
| 34 |
+
@log_entry
|
| 35 |
+
def get_current_weather(city: str) -> str:
|
| 36 |
+
"""
|
| 37 |
+
Get the current weather information for a specified city.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
city (str): The name of the city to get current weather data for.
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
str: A string describing the current temperature and weather condition.
|
| 44 |
+
"""
|
| 45 |
+
print('Entered into get_current_weather().')
|
| 46 |
+
weather_data = self.weather_service.get_current_weather(city)
|
| 47 |
+
if weather_data:
|
| 48 |
+
temp = weather_data.get('main', {}).get('temp', 'N/A')
|
| 49 |
+
desc = weather_data.get('weather', [{}])[0].get('description', 'N/A')
|
| 50 |
+
return f"Current weather in {city}: {temp}°C, {desc}"
|
| 51 |
+
return f"Could not fetch weather for {city}"
|
| 52 |
+
|
| 53 |
+
@tool
|
| 54 |
+
@log_entry
|
| 55 |
+
def get_weather_forecast(city: str) -> str:
|
| 56 |
+
"""
|
| 57 |
+
Get the multi-day weather forecast for a specified city.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
city (str): The name of the city to get the weather forecast for.
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
str: A string listing daily temperatures and descriptions for upcoming forecasts.
|
| 64 |
+
"""
|
| 65 |
+
print('Entered into get_weather_forecast().')
|
| 66 |
+
forecast_data = self.weather_service.get_forecast_weather(city)
|
| 67 |
+
if forecast_data and 'list' in forecast_data:
|
| 68 |
+
forecast_summary = []
|
| 69 |
+
for item in forecast_data['list']:
|
| 70 |
+
date = item['dt_txt'].split(' ')[0]
|
| 71 |
+
temp = item['main']['temp']
|
| 72 |
+
desc = item['weather'][0]['description']
|
| 73 |
+
forecast_summary.append(f"{date}: {temp}°C, {desc}")
|
| 74 |
+
return f"Weather forecast for {city}:\n" + "\n".join(forecast_summary)
|
| 75 |
+
return f"Could not fetch forecast for {city}"
|
| 76 |
+
|
| 77 |
+
return [get_current_weather, get_weather_forecast]
|
utils/__init__.py
ADDED
|
File without changes
|
utils/config_loader.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import yaml
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
def load_config(config_path: str = "config/config.yaml") -> dict:
|
| 5 |
+
with open(config_path, "r") as file:
|
| 6 |
+
config = yaml.safe_load(file)
|
| 7 |
+
# print(config)
|
| 8 |
+
return config
|
utils/currency_converter.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
|
| 3 |
+
class CurrencyConverter:
|
| 4 |
+
def __init__(self, api_key: str):
|
| 5 |
+
self.base_url = f"https://v6.exchangerate-api.com/v6/{api_key}/latest/"
|
| 6 |
+
|
| 7 |
+
def convert(self, amount:float, from_currency:str, to_currency:str):
|
| 8 |
+
"""Convert the amount from one currency to another"""
|
| 9 |
+
url = f"{self.base_url}/{from_currency}"
|
| 10 |
+
response = requests.get(url)
|
| 11 |
+
if response.status_code != 200:
|
| 12 |
+
raise Exception("API call failed:", response.json())
|
| 13 |
+
rates = response.json()["conversion_rates"]
|
| 14 |
+
if to_currency not in rates:
|
| 15 |
+
raise ValueError(f"{to_currency} not found in exchange rates.")
|
| 16 |
+
return amount * rates[to_currency]
|
utils/expense_calculator.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class Calculator:
|
| 2 |
+
@staticmethod
|
| 3 |
+
def multiply(a: int, b: int) -> int:
|
| 4 |
+
"""
|
| 5 |
+
Multiply two integers.
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
a (int): The first integer.
|
| 9 |
+
b (int): The second integer.
|
| 10 |
+
|
| 11 |
+
Returns:
|
| 12 |
+
int: The product of a and b.
|
| 13 |
+
"""
|
| 14 |
+
return a * b
|
| 15 |
+
|
| 16 |
+
@staticmethod
|
| 17 |
+
def calculate_total(*x: float) -> float:
|
| 18 |
+
"""
|
| 19 |
+
Calculate sum of the given list of numbers
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
x (list): List of floating numbers
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
float: The sum of numbers in the list x
|
| 26 |
+
"""
|
| 27 |
+
return sum(x)
|
| 28 |
+
|
| 29 |
+
@staticmethod
|
| 30 |
+
def calculate_daily_budget(total: float, days: int) -> float:
|
| 31 |
+
"""
|
| 32 |
+
Calculate daily budget
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
total (float): Total cost.
|
| 36 |
+
days (int): Total number of days
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
float: Expense for a single day
|
| 40 |
+
"""
|
| 41 |
+
return total / days if days > 0 else 0
|
| 42 |
+
|
| 43 |
+
|
utils/model_loader.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
from utils.config_loader import load_config
|
| 4 |
+
from langchain_groq import ChatGroq
|
| 5 |
+
from langchain_openai import ChatOpenAI, AzureChatOpenAI
|
| 6 |
+
from azure.identity import AzureCliCredential, ManagedIdentityCredential
|
| 7 |
+
|
| 8 |
+
# Load .env file
|
| 9 |
+
load_dotenv()
|
| 10 |
+
|
| 11 |
+
class ConfigLoader:
|
| 12 |
+
def __init__(self):
|
| 13 |
+
print("Loading config...")
|
| 14 |
+
self.config = load_config()
|
| 15 |
+
|
| 16 |
+
def __getitem__(self, key):
|
| 17 |
+
return self.config[key]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class ModelLoader:
|
| 21 |
+
def __init__(self, model_provider: str = "azureopenai"):
|
| 22 |
+
|
| 23 |
+
print(f"Initializing ModelLoader with provider: {model_provider}")
|
| 24 |
+
self.model_provider = model_provider.lower()
|
| 25 |
+
self.config = ConfigLoader()
|
| 26 |
+
|
| 27 |
+
def load_llm(self):
|
| 28 |
+
self.model_provider = "groq"
|
| 29 |
+
print(f"LLM loading from provider: {self.model_provider}")
|
| 30 |
+
|
| 31 |
+
if self.model_provider == "groq":
|
| 32 |
+
print("→ Using Groq")
|
| 33 |
+
groq_api_key = os.getenv("GROQ_API_KEY")
|
| 34 |
+
print(f"Groq API Key: {groq_api_key}")
|
| 35 |
+
if not groq_api_key:
|
| 36 |
+
raise ValueError("GROQ_API_KEY environment variable is not set.")
|
| 37 |
+
model_name = self.config["llm"]["groq"]["model_name"]
|
| 38 |
+
return ChatGroq(model=model_name, api_key=groq_api_key)
|
| 39 |
+
|
| 40 |
+
elif self.model_provider == "openai":
|
| 41 |
+
print("→ Using OpenAI")
|
| 42 |
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
| 43 |
+
model_name = self.config["llm"]["openai"]["model_name"]
|
| 44 |
+
return ChatOpenAI(model_name=model_name, api_key=openai_api_key)
|
| 45 |
+
|
| 46 |
+
elif self.model_provider == "azureopenai":
|
| 47 |
+
print("→ Using Azure OpenAI")
|
| 48 |
+
client_id = os.getenv("AZURE_MANAGED_IDENTITY_CLIENT_ID")
|
| 49 |
+
if client_id and len(client_id) > 1:
|
| 50 |
+
credential = ManagedIdentityCredential(client_id=client_id)
|
| 51 |
+
else:
|
| 52 |
+
credential = AzureCliCredential()
|
| 53 |
+
|
| 54 |
+
token = credential.get_token("https://cognitiveservices.azure.com/.default").token
|
| 55 |
+
if not token:
|
| 56 |
+
raise ValueError("Azure token could not be retrieved.")
|
| 57 |
+
return AzureChatOpenAI(
|
| 58 |
+
azure_endpoint=self.config["llm"]["azureopenai"]["endpoint"],
|
| 59 |
+
azure_deployment=self.config["llm"]["azureopenai"]["model_name"],
|
| 60 |
+
api_version=self.config["llm"]["azureopenai"]["api_version"],
|
| 61 |
+
api_key=token
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
else:
|
| 65 |
+
raise ValueError(f"Unsupported model provider: {self.model_provider}")
|
| 66 |
+
|
| 67 |
+
|
utils/place_info_search.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from langchain_tavily import TavilySearch
|
| 4 |
+
from langchain_google_community import GooglePlacesTool, GooglePlacesAPIWrapper
|
| 5 |
+
|
| 6 |
+
class GooglePlaceSearchTool:
|
| 7 |
+
def __init__(self, api_key: str):
|
| 8 |
+
self.places_wrapper = GooglePlacesAPIWrapper(gplaces_api_key=api_key)
|
| 9 |
+
self.places_tool = GooglePlacesTool(api_wrapper=self.places_wrapper)
|
| 10 |
+
|
| 11 |
+
def google_search_attractions(self, place: str) -> dict:
|
| 12 |
+
"""
|
| 13 |
+
Searches for attractions in the specified place using GooglePlaces API.
|
| 14 |
+
"""
|
| 15 |
+
return self.places_tool.run(f"top attractive places in and around {place}")
|
| 16 |
+
|
| 17 |
+
def google_search_restaurants(self, place: str) -> dict:
|
| 18 |
+
"""
|
| 19 |
+
Searches for available restaurants in the specified place using GooglePlaces API.
|
| 20 |
+
"""
|
| 21 |
+
return self.places_tool.run(f"what are the top 10 restaurants and eateries in and around {place}?")
|
| 22 |
+
|
| 23 |
+
def google_search_activity(self, place: str) -> dict:
|
| 24 |
+
"""
|
| 25 |
+
Searches for popular activities in the specified place using GooglePlaces API.
|
| 26 |
+
"""
|
| 27 |
+
return self.places_tool.run(f"Activities in and around {place}")
|
| 28 |
+
|
| 29 |
+
def google_search_transportation(self, place: str) -> dict:
|
| 30 |
+
"""
|
| 31 |
+
Searches for available modes of transportation in the specified place using GooglePlaces API.
|
| 32 |
+
"""
|
| 33 |
+
return self.places_tool.run(f"What are the different modes of transportations available in {place}")
|
| 34 |
+
|
| 35 |
+
class TavilyPlaceSearchTool:
|
| 36 |
+
def __init__(self):
|
| 37 |
+
pass
|
| 38 |
+
|
| 39 |
+
def tavily_search_attractions(self, place: str) -> dict:
|
| 40 |
+
"""
|
| 41 |
+
Searches for attractions in the specified place using TavilySearch.
|
| 42 |
+
"""
|
| 43 |
+
tavily_tool = TavilySearch(topic="general", include_answer="advanced")
|
| 44 |
+
result = tavily_tool.invoke({"query": f"top attractive places in and around {place}"})
|
| 45 |
+
if isinstance(result, dict) and result.get("answer"):
|
| 46 |
+
return result["answer"]
|
| 47 |
+
return result
|
| 48 |
+
|
| 49 |
+
def tavily_search_restaurants(self, place: str) -> dict:
|
| 50 |
+
"""
|
| 51 |
+
Searches for available restaurants in the specified place using TavilySearch.
|
| 52 |
+
"""
|
| 53 |
+
tavily_tool = TavilySearch(topic="general", include_answer="advanced")
|
| 54 |
+
result = tavily_tool.invoke({"query": f"what are the top 10 restaurants and eateries in and around {place}."})
|
| 55 |
+
if isinstance(result, dict) and result.get("answer"):
|
| 56 |
+
return result["answer"]
|
| 57 |
+
return result
|
| 58 |
+
|
| 59 |
+
def tavily_search_activity(self, place: str) -> dict:
|
| 60 |
+
"""
|
| 61 |
+
Searches for popular activities in the specified place using TavilySearch.
|
| 62 |
+
"""
|
| 63 |
+
tavily_tool = TavilySearch(topic="general", include_answer="advanced")
|
| 64 |
+
result = tavily_tool.invoke({"query": f"activities in and around {place}"})
|
| 65 |
+
if isinstance(result, dict) and result.get("answer"):
|
| 66 |
+
return result["answer"]
|
| 67 |
+
return result
|
| 68 |
+
|
| 69 |
+
def tavily_search_transportation(self, place: str) -> dict:
|
| 70 |
+
"""
|
| 71 |
+
Searches for available modes of transportation in the specified place using TavilySearch.
|
| 72 |
+
"""
|
| 73 |
+
tavily_tool = TavilySearch(topic="general", include_answer="advanced")
|
| 74 |
+
result = tavily_tool.invoke({"query": f"What are the different modes of transportations available in {place}"})
|
| 75 |
+
if isinstance(result, dict) and result.get("answer"):
|
| 76 |
+
return result["answer"]
|
| 77 |
+
return result
|
| 78 |
+
|
utils/save_to_document.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import datetime
|
| 3 |
+
|
| 4 |
+
def save_document(response_text: str, directory: str = "./output"):
|
| 5 |
+
"""Export travel plan to Markdown file with proper formatting"""
|
| 6 |
+
os.makedirs(directory, exist_ok=True)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# Create markdown content with metadata header
|
| 10 |
+
markdown_content = f"""# 🌍 AI Travel Plan
|
| 11 |
+
|
| 12 |
+
# **Generated:** {datetime.datetime.now().strftime('%Y-%m-%d at %H:%M')}
|
| 13 |
+
# **Created by:** kube9t's Travel Agent
|
| 14 |
+
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
{response_text}
|
| 18 |
+
|
| 19 |
+
---
|
| 20 |
+
|
| 21 |
+
*This travel plan was generated by AI. Please verify all information, especially prices, operating hours, and travel requirements before your trip.*
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
try:
|
| 25 |
+
# Write to markdown file with UTF-8 encoding
|
| 26 |
+
# Generate timestamp-based filename
|
| 27 |
+
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
| 28 |
+
filename = f"{directory}/AI_Trip_Planner_{timestamp}.md"
|
| 29 |
+
|
| 30 |
+
print(filename)
|
| 31 |
+
|
| 32 |
+
with open(filename, 'w', encoding='utf-8') as f:
|
| 33 |
+
f.write(markdown_content)
|
| 34 |
+
|
| 35 |
+
print(f"Markdown file saved as: {filename}")
|
| 36 |
+
return filename
|
| 37 |
+
|
| 38 |
+
except Exception as e:
|
| 39 |
+
print(f"Error saving markdown file: {e}")
|
| 40 |
+
return None
|
utils/weather_info.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
|
| 3 |
+
class WeatherForecastTool:
|
| 4 |
+
def __init__(self, api_key:str):
|
| 5 |
+
self.api_key = api_key
|
| 6 |
+
self.base_url = "https://api.openweathermap.org/data/2.5"
|
| 7 |
+
|
| 8 |
+
def get_current_weather(self, place:str):
|
| 9 |
+
"""Get current weather of a place"""
|
| 10 |
+
try:
|
| 11 |
+
url = f"{self.base_url}/weather"
|
| 12 |
+
params = {
|
| 13 |
+
"q": place,
|
| 14 |
+
"appid": self.api_key,
|
| 15 |
+
}
|
| 16 |
+
response = requests.get(url, params=params)
|
| 17 |
+
return response.json() if response.status_code == 200 else {}
|
| 18 |
+
except Exception as e:
|
| 19 |
+
raise e
|
| 20 |
+
|
| 21 |
+
def get_forecast_weather(self, place:str):
|
| 22 |
+
"""Get weather forecast of a place"""
|
| 23 |
+
try:
|
| 24 |
+
url = f"{self.base_url}/forecast"
|
| 25 |
+
params = {
|
| 26 |
+
"q": place,
|
| 27 |
+
"appid": self.api_key,
|
| 28 |
+
"cnt": 10,
|
| 29 |
+
"units": "metric"
|
| 30 |
+
}
|
| 31 |
+
response = requests.get(url, params=params)
|
| 32 |
+
return response.json() if response.status_code == 200 else {}
|
| 33 |
+
except Exception as e:
|
| 34 |
+
raise e
|