repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/services/db_service.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/services/db_service.py | """
Database service module for PostgreSQL connections using SQLAlchemy.
This module provides utilities for connecting to a PostgreSQL database
with SQLAlchemy, including connection pooling, session management,
and context managers for proper resource management.
"""
import os
from typing import Any, AsyncGenerator, Dict, Optional
from contextlib import asynccontextmanager
from sqlalchemy import text
from sqlalchemy.ext.asyncio import (
AsyncSession,
create_async_engine,
async_sessionmaker,
AsyncEngine
)
from loguru import logger
# Database connection string from environment variable
# Convert psycopg to SQLAlchemy format if needed
DATABASE_URL = os.getenv("DATABASE_URL", "")
if DATABASE_URL.startswith("postgresql://"):
# Convert to asyncpg format for SQLAlchemy async
DATABASE_URL = DATABASE_URL.replace("postgresql://", "postgresql+asyncpg://")
# Global engine and session factory
_engine: Optional[AsyncEngine] = None
_session_factory: Optional[async_sessionmaker[AsyncSession]] = None
async def initialize_db_pool(pool_size: int = 10, max_overflow: int = 20) -> None:
"""Initialize the SQLAlchemy engine and session factory.
Args:
pool_size: Pool size for the connection pool
max_overflow: Maximum number of connections that can be created beyond the pool size
"""
global _engine, _session_factory
if _engine is not None:
return
logger.info("Initializing SQLAlchemy engine and session factory")
try:
_engine = create_async_engine(
DATABASE_URL,
echo=False, # Set to True for SQL query logging
pool_size=pool_size,
max_overflow=max_overflow,
pool_pre_ping=True, # Verify connections before using them
)
_session_factory = async_sessionmaker(
_engine,
expire_on_commit=False,
autoflush=False,
)
# Test the connection
async with _session_factory() as session:
await session.execute(text("SELECT 1"))
logger.info("SQLAlchemy engine and session factory initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize SQLAlchemy engine and session factory: {e}")
raise
async def close_db_pool() -> None:
"""Close the SQLAlchemy engine and connection pool."""
global _engine
if _engine is not None:
logger.info("Closing SQLAlchemy engine and connection pool")
await _engine.dispose()
_engine = None
@asynccontextmanager
async def get_db_session() -> AsyncGenerator[AsyncSession, None]:
"""Get a SQLAlchemy session for database operations.
Returns:
AsyncSession: SQLAlchemy async session
Example:
```python
async with get_db_session() as session:
result = await session.execute(text("SELECT * FROM users"))
users = result.fetchall()
```
"""
if _session_factory is None:
await initialize_db_pool()
async with _session_factory() as session:
try:
yield session
except Exception as e:
await session.rollback()
logger.error(f"Database session operation failed: {e}")
raise
async def execute_query(query: str, params: Optional[Dict[str, Any]] = None) -> list:
"""Execute a database query and return results.
Args:
query: SQL query to execute (raw SQL)
params: Query parameters (for parameterized queries)
Returns:
list: Query results
Example:
```python
results = await execute_query(
"SELECT * FROM users WHERE email = :email",
{"email": "user@example.com"}
)
```
"""
async with get_db_session() as session:
try:
result = await session.execute(text(query), params or {})
try:
return result.fetchall()
except Exception:
# No results to fetch
return []
except Exception as e:
logger.error(f"Failed to execute query: {e}")
raise | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/tools/scrape.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/tools/scrape.py | from firecrawl import FirecrawlApp, ScrapeOptions
import os
from agno.tools import tool
from loguru import logger
from config.logger import logger_hook
app = FirecrawlApp(api_key=os.getenv("FIRECRAWL_API_KEY"))
@tool(
name="scrape_website",
description="Scrape a website and return the markdown content.",
tool_hooks=[logger_hook],
)
def scrape_website(url: str) -> str:
"""Scrape a website and return the markdown content.
Args:
url (str): The URL of the website to scrape.
Returns:
str: The markdown content of the website.
Example:
>>> scrape_website("https://www.google.com")
"## Google"
"""
scrape_status = app.scrape_url(
url,
formats=["markdown"],
wait_for=30000,
timeout=60000,
)
return scrape_status.markdown
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/tools/google_flight.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/tools/google_flight.py | from fast_flights import FlightData, Passengers, Result, get_flights
from typing import Literal
from loguru import logger
from agno.tools import tool
from config.logger import logger_hook
@tool(name="get_flights", show_result=True, tool_hooks=[logger_hook])
def get_google_flights(
departure: str,
destination: str,
date: str,
trip: Literal["one-way", "round-trip"] = "one-way",
adults: int = 1,
children: int = 0,
cabin_class: Literal["first", "business", "premium-economy", "economy"] = "economy",
) -> Result:
"""
Get flights from Google Flights
:param departure: The departure airport code
:param destination: The destination airport code
:param date: The date of the flight in the format 'YYYY-MM-DD'
:param trip: The type of trip (one-way, round-trip)
:param adults: The number of adults (default 1)
:param children: The number of children (default 0)
:param cabin_class: The cabin class (first, business, premium-economy, economy)
:return: Flight Results
"""
logger.info(
f"Getting flights from Google Flights for {departure} to {destination} on {date}"
)
try:
result: Result = get_flights(
flight_data=[
FlightData(date=date, from_airport=departure, to_airport=destination)
],
trip=trip,
seat=cabin_class,
passengers=Passengers(
adults=adults, children=children, infants_in_seat=0, infants_on_lap=0
),
fetch_mode="fallback",
)
logger.info(f"Flights found: {result.flights}")
return result.flights
except Exception as e:
logger.error(f"Error getting flights from Google Flights: {e}")
return []
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/tools/kayak_hotel.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/tools/kayak_hotel.py | from config.logger import logger_hook
from typing import Optional
from agno.tools import tool
from models.hotel import HotelSearchRequest
from loguru import logger
@tool(
name="kayak_hotel_url_generator",
show_result=True,
tool_hooks=[logger_hook]
)
def kayak_hotel_url_generator(
destination: str, check_in: str, check_out: str, adults: int = 1, children: int = 0, rooms: int = 1, sort: str = "recommended"
) -> str:
"""
Generates a Kayak URL for hotels in the specified destination between check_in and check_out dates.
:param destination: The destination city or area (e.g. "Berlin", "City Center, Singapore", "Red Fort, Delhi")
:param check_in: The date of check-in in the format 'YYYY-MM-DD'
:param check_out: The date of check-out in the format 'YYYY-MM-DD'
:param adults: The number of adults (default 1)
:param children: The number of children (default 0)
:param rooms: The number of rooms (default 1)
:param sort: The sort order (recommended, distance, price, rating)
:return: The Kayak URL for the hotel search
"""
request = HotelSearchRequest(
destination=destination,
check_in=check_in,
check_out=check_out,
adults=adults,
children=children,
rooms=rooms,
sort=sort)
logger.info(f"Request: {request}")
logger.info(f"Generating Kayak URL for {destination} on {check_in} to {check_out}")
URL = f"https://www.kayak.com/hotels/{destination}/{check_in}/{check_out}"
URL += f"/{adults}adults"
if children > 0:
URL += f"/{children}children"
if rooms > 1:
URL += f"/{rooms}rooms"
URL += "?currency=USD"
if sort.lower() == "price":
URL += "&sort=price_a"
elif sort.lower() == "rating":
URL += "&sort=userrating_b"
elif sort.lower() == "distance":
URL += "&sort=distance_a"
logger.info(f"URL: {URL}")
return URL
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/tools/kayak_flight.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/tools/kayak_flight.py | from config.logger import logger_hook
from typing import Optional
from agno.tools import tool
from models.flight import FlightSearchRequest
from loguru import logger
@tool(
name="kayak_flight_url_generator",
show_result=True,
tool_hooks=[logger_hook]
)
def kayak_flight_url_generator(
departure: str, destination: str, date: str, return_date: Optional[str] = None, adults: int = 1, children: int = 0, cabin_class: Optional[str] = None, sort: str = "best"
) -> str:
"""
Generates a Kayak URL for flights between departure and destination on the specified date.
:param departure: The IATA code for the departure airport (e.g., 'SOF' for Sofia)
:param destination: The IATA code for the destination airport (e.g., 'BER' for Berlin)
:param date: The date of the flight in the format 'YYYY-MM-DD'
:return_date: Only for two-way tickets. The date of return flight in the format 'YYYY-MM-DD'
:param adults: The number of adults (default 1)
:param children: The number of children (default 0)
:param cabin_class: The cabin class (first, business, premium, economy)
:param sort: The sort order (best, cheapest)
:return: The Kayak URL for the flight search
"""
request = FlightSearchRequest(
departure=departure,
destination=destination,
date=date,
return_date=return_date,
adults=adults,
children=children,
cabin_class=cabin_class,
sort=sort)
logger.info(f"Request: {request}")
logger.info(f"Generating Kayak URL for {departure} to {destination} on {date}")
URL = f"https://www.kayak.com/flights/{departure}-{destination}/{date}"
if return_date:
URL += f"/{return_date}"
if cabin_class and cabin_class.lower() != "economy":
URL += f"/{cabin_class.lower()}"
URL += f"/{adults}adults"
if children > 0:
URL += f"/children"
for _ in range(children):
URL += "-11"
URL += "?currency=USD"
if sort.lower() == "cheapest":
URL += "&sort=price_a"
elif sort.lower() == "best":
URL += "&sort=bestflight_a"
logger.info(f"URL: {URL}")
return URL
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/food.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/food.py | from agno.tools.exa import ExaTools
from config.llm import model
from agno.agent import Agent
dining_agent = Agent(
name="Culinary Guide",
role="Research dining and food experiences when asked by team leader",
model=model,
tools=[ExaTools()],
description="You research restaurants, food markets, culinary experiences, and dining options when assigned by the team leader.",
instructions=[
"# Culinary Research and Recommendation Assistant",
"",
"## Task 1: Query Processing",
"- Parse dining preferences from user query",
"- Extract:",
" - Location/area",
" - Cuisine preferences",
" - Dietary restrictions",
" - Budget range",
" - Meal timing",
" - Group size",
" - Special requirements (e.g., family-friendly, romantic)",
"",
"## Task 2: Research & Data Collection",
"- Search for restaurants and food experiences using ExaTools",
"- Gather information about:",
" - Local cuisine specialties",
" - Popular food markets",
" - Culinary experiences",
" - Operating hours",
" - Price ranges",
" - Reservation policies",
"",
"## Task 3: Content Analysis",
"- Analyze restaurant reviews and ratings",
"- Evaluate:",
" - Food quality",
" - Service standards",
" - Ambiance",
" - Value for money",
" - Dietary accommodation",
" - Family-friendliness",
"",
"## Task 4: Data Processing",
"- Filter results based on:",
" - Dietary requirements",
" - Budget constraints",
" - Location preferences",
" - Special requirements",
"- Validate information completeness",
"",
"## Task 5: Results Presentation",
"Present recommendations in a clear, organized format:",
"",
"### Restaurant Recommendations",
"For each restaurant, include:",
"- Name and cuisine type",
"- Price range (e.g., $, $$, $$$)",
"- Rating and brief review summary",
"- Location and accessibility",
"- Operating hours",
"- Dietary options available",
"- Special features (e.g., outdoor seating, view)",
"- Reservation requirements",
"- Popular dishes to try",
"",
"### Food Markets & Culinary Experiences",
"- Market names and specialties",
"- Best times to visit",
"- Must-try local foods",
"- Cultural significance",
"",
"### Additional Information",
"- Local food customs and etiquette",
"- Peak dining hours to avoid",
"- Transportation options",
"- Food safety tips",
"",
"Format the output in clear sections with emojis and bullet points for better readability.",
],
expected_output="""
Present dining recommendations in a clear, organized format with the following sections:
# 🍽️ Restaurant Recommendations
For each recommended restaurant:
- Name and cuisine type
- Price range and value rating
- Location and accessibility
- Operating hours
- Dietary options
- Special features
- Popular dishes
- Reservation info
# 🛍️ Food Markets & Experiences
- Market names and specialties
- Best visiting times
- Local food highlights
- Cultural significance
# ℹ️ Additional Information
- Local customs
- Peak hours
- Transportation
- Safety tips
Use emojis and clear formatting for better readability.
""",
markdown=True,
show_tool_calls=True,
debug_mode=True,
retries=3,
delay_between_retries=2,
exponential_backoff=True,
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/budget.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/budget.py | from agno.agent import Agent
from config.llm import model
budget_agent = Agent(
name="Budget Optimizer",
role="Calculate costs and optimize travel budgets when asked by team leader",
model=model,
description="You research costs, compare prices, and optimize travel budgets when assigned by the team leader. When plans exceed budget, you suggest strategic adjustments to bring costs in line while preserving the core travel experience.",
instructions=[
"# Budget Optimization Instructions",
"",
"1. Analyze total budget and cost requirements:",
" - Review total budget limit",
" - Calculate costs for transportation, accommodations, activities, food",
" - Identify any components exceeding budget",
"",
"2. If over budget, suggest cost-saving alternatives:",
" - Alternative accommodations or locations",
" - Different transportation options",
" - Mix of premium and budget experiences",
" - Free or lower-cost activity substitutes",
" - Budget-friendly dining recommendations",
"",
"3. Research and recommend money-saving strategies:",
" - Early booking discounts",
" - Package deals",
" - Off-peak pricing",
" - Local passes and discount cards",
"",
"4. Present clear budget breakdown showing:",
" - Original vs optimized costs",
" - Specific savings per category",
" - Alternative options",
" - Hidden cost warnings",
"",
"Format all amounts in user's preferred currency with clear comparisons between original and optimized budgets.",
],
markdown=True,
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/destination.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/destination.py | from agno.agent import Agent
from agno.tools.exa import ExaTools
from agno.tools.firecrawl import FirecrawlTools
from config.llm import model
destination_agent = Agent(
name="Destination Explorer",
model=model,
tools=[
ExaTools(
num_results=10,
),
],
description="You are a destination research agent that focuses on recommending mainstream tourist attractions and classic experiences that most travelers would enjoy. You prioritize well-known landmarks and popular activities while keeping recommendations general and widely appealing.",
instructions=[
"1. Focus on mainstream attractions with thoughtful guidance:",
" - Famous landmarks and monuments",
" - Popular tourist spots",
" - Well-known museums",
" - Classic shopping areas",
" - Common tourist activities",
"",
"2. Guide visitors with simple reasoning:",
" - Suggest crowd-pleasing activities",
" - Focus on family-friendly locations",
" - Recommend proven tourist routes",
" - Include popular photo spots",
"",
"3. Present clear attraction information:",
" - Simple description",
" - General location",
" - Regular opening hours",
" - Standard entrance fees",
" - Typical visit duration",
" - Basic visitor tips",
"",
"4. Organize information logically:",
" - Main attractions first",
" - Common day trips",
" - Standard tourist areas",
" - Popular activities",
"",
"Use tools to find and verify tourist information.",
"Keep suggestions general and widely appealing.",
],
expected_output="""
# Tourist Guide
## Main Attractions
List of most popular tourist spots
## Common Activities
Standard tourist activities and experiences
## Popular Areas
Well-known districts and neighborhoods
## Basic Information
- General visiting tips
- Common transportation options
- Standard tourist advice
""",
markdown=True,
show_tool_calls=True,
add_datetime_to_instructions=True,
retries=3,
delay_between_retries=2,
exponential_backoff=True,
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/flight.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/flight.py | from agno.agent import Agent
from agno.tools.firecrawl import FirecrawlTools
from tools.google_flight import get_google_flights
from config.llm import model
flight_search_agent = Agent(
name="Flight Search Assistant",
model=model,
tools=[
# FirecrawlTools(poll_interval=10),
# kayak_flight_url_generator,
get_google_flights,
],
instructions=[
"You are a sophisticated flight search and analysis assistant for comprehensive travel planning. For any user query:",
"1. Parse complete flight requirements including:",
" - Origin and destination cities",
" - Travel dates (outbound and return)",
" - Number of travelers (adults, children, infants)",
" - Preferred cabin class",
" - Any specific airlines or routing preferences",
" - Budget constraints if specified",
# "2. Search and analyze multiple flight options:",
"2. Search for flight options:",
# " - Use kayak_url_generator to create appropriate search URLs",
# " - Navigate to and extract data from flight search results",
" - Use get_google_flights to get flight results",
" - Consider both direct and connecting flights",
" - Compare different departure times and airlines",
"3. For each viable flight option, extract:",
" - Complete pricing breakdown (base fare, taxes, total)",
" - Flight numbers and operating airlines",
" - Detailed timing (departure, arrival, duration, layovers)",
" - Aircraft types and amenities when available",
" - Baggage allowance and policies",
"4. Organize and present options with focus on:",
" - Best value for money",
" - Convenient timing and minimal layovers",
" - Reliable airlines with good service records",
" - Flexibility and booking conditions",
"5. Provide practical recommendations considering:",
" - Price trends and booking timing",
" - Alternative dates or nearby airports if beneficial",
" - Loyalty program benefits if applicable",
" - Special requirements (extra legroom, dietary, etc.)",
"6. Include booking guidance:",
" - Direct booking links when available",
" - Fare rules and change policies",
" - Required documents and visa implications",
# "7. Always close browser sessions after completion",
],
expected_output="""
All flight details with the following fields:
- flight_number (str): The flight number of the flight
- price (str): The price of the flight
- airline (str): The airline of the flight
- departure_time (str): The departure time of the flight
- arrival_time (str): The arrival time of the flight
- duration (str): The duration of the flight
- stops (int): The number of stops of the flight
""",
markdown=True,
show_tool_calls=True,
debug_mode=True,
retries=3,
delay_between_retries=2,
exponential_backoff=True,
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/hotel.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/hotel.py | from agno.agent import Agent
from tools.kayak_hotel import kayak_hotel_url_generator
from tools.scrape import scrape_website
from config.llm import model
from models.hotel import HotelResult, HotelResults
hotel_search_agent = Agent(
name="Hotel Search Assistant",
model=model,
tools=[
scrape_website,
kayak_hotel_url_generator,
],
instructions=[
"# Hotel Search and Data Extraction Assistant",
"",
"## Task 1: Query Processing",
"- Parse hotel search parameters from user query",
"- Extract:",
" - Destination",
" - Check-in/out dates",
" - Number of guests (adults, children)",
" - Room requirements",
" - Budget constraints",
" - Preferred amenities",
" - Location preferences",
"",
"## Task 2: URL Generation & Initial Scraping",
"- Generate Kayak URL using `kayak_hotel_url_generator`",
"- Perform initial content scrape with `scrape_website`",
"- Handle URL encoding for special characters in destination names",
"",
"## Task 3: Data Extraction",
"- Parse hotel listings from scraped content",
"- Extract key details:",
" - Prices (including taxes and fees)",
" - Amenities (especially family-friendly features)",
" - Ratings and reviews",
" - Location details",
" - Room types and availability",
" - Cancellation policies",
"- Handle dynamic loading of results",
"- Navigate multiple pages if needed",
"",
"## Task 4: Data Processing",
"- Structure extracted hotel data according to HotelResult model",
"- Validate data completeness",
"- Filter results based on:",
" - Budget constraints",
" - Required amenities",
" - Location preferences",
" - Family-friendly features",
"",
"## Task 5: Results Presentation",
"- Format results clearly with:",
" - Hotel name and rating",
" - Price breakdown",
" - Location and accessibility",
" - Key amenities",
" - Family-friendly features",
" - Booking policies",
"- Sort results by relevance to user preferences",
"- Include direct booking links",
"",
],
expected_output="""
List of hotels with the following fields for each hotel:
- hotel_name (str): The name of the hotel
- price (str): The price of the hotel
- rating (str): The rating of the hotel
- address (str): The address of the hotel
- amenities (List[str]): The amenities of the hotel
- description (str): The description of the hotel
- url (str): The url of the hotel
""",
markdown=True,
show_tool_calls=True,
debug_mode=True,
retries=3,
delay_between_retries=2,
exponential_backoff=True,
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/team.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/team.py | from agno.team.team import Team
from config.llm import model, model2
from agents.destination import destination_agent
from agents.hotel import hotel_search_agent
from agents.food import dining_agent
from agents.budget import budget_agent
from agents.flight import flight_search_agent
from agents.itinerary import itinerary_agent
from loguru import logger
from agno.tools.reasoning import ReasoningTools
# def update_team_current_state(team: Team, state: str) -> str:
# """
# This function is used to set the current state of the team.
# """
# logger.info(f"The current state of the team is {state}")
# team.session_state["current_state"] = state
# return state
trip_planning_team = Team(
name="TripCraft AI Team",
mode="coordinate",
model=model,
tools=[ReasoningTools(add_instructions=True)],
members=[
destination_agent,
hotel_search_agent,
dining_agent,
budget_agent,
flight_search_agent,
itinerary_agent,
],
show_tool_calls=True,
markdown=True,
description=(
"You are the lead orchestrator of the TripCraft AI planning team. "
"Your mission is to transform the user's travel preferences into a magical, stress-free itinerary. "
"Based on a single input form, you'll collaborate with expert agents handling flights, stays, dining, activities, and budgeting. "
"The result should be a beautifully crafted, practical, and emotionally resonant travel plan that feels personally designed. "
"Every detail matters - from the exact timing of activities to the ambiance of recommended restaurants. "
"Your goal is to create an itinerary so thorough and thoughtful that it feels like having a personal travel concierge."
),
instructions=[
"1. Meticulously analyze the complete travel preferences from the user input:",
" - Primary destination and any secondary locations",
" - Exact travel dates including arrival and departure times",
" - Preferred pace (relaxed, moderate, or fast-paced) with specific timing preferences",
" - Travel style (luxury, mid-range, budget) with detailed expectations",
" - Budget range with currency and flexibility notes",
" - Companion details (solo, couple, family, friends) with group dynamics",
" - Accommodation requirements (room types, amenities, location preferences)",
" - Desired vibes (romantic, adventurous, relaxing, etc.) with specific examples",
" - Top priorities (Instagram spots, local experiences, food, shopping) ranked by importance",
" - Special interests, dietary restrictions, accessibility needs",
" - Previous travel experiences and preferences",
"",
"2. Transportation Planning:",
" - Map out exact routes from start location to all destinations",
" - Research optimal flight/train combinations considering:",
" • Departure/arrival times aligned with check-in/out times",
" • Layover durations and airport transfer times",
" • Airline alliance benefits and baggage policies",
" • Alternative airports and routes for cost optimization",
" - Plan local transportation between all points of interest",
"",
"3. Coordinate with Specialized Agents:",
" - Flight Agent: Detailed air travel options with timing and pricing",
" - Hotel Agent: Accommodation matches for each night with amenity details",
" - Dining Agent: Restaurant recommendations with cuisine, price, and ambiance",
" - Activity Agent: Curated experiences matching interests and pace",
" - Budget Agent: Cost optimization while maintaining experience quality",
"",
"4. Create Detailed Daily Schedules:",
" Morning (6am-12pm):",
" - Breakfast venues with opening hours and signature dishes",
" - Morning activities with exact durations and travel times",
" - Alternative options for weather contingencies",
"",
" Afternoon (12pm-6pm):",
" - Lunch recommendations with peak times and reservation needs",
" - Main sightseeing with entrance fees and skip-the-line options",
" - Rest periods aligned with pace preference",
"",
" Evening (6pm-midnight):",
" - Dinner venues with ambiance descriptions and dress codes",
" - Evening entertainment options",
" - Nightlife suggestions if requested",
"",
"5. Experience Enhancement:",
" - Research and highlight hidden gems matching user interests",
" - Identify unique local experiences with cultural significance",
" - Find Instagram-worthy locations with best photo times",
" - Source exclusive or unusual accommodation options",
" - Map romantic spots for couples or family-friendly venues",
"",
"6. Budget Management:",
" - Break down costs to the smallest detail:",
" • Transportation (flights, trains, taxis, public transit)",
" • Accommodations (nightly rates, taxes, fees)",
" • Activities (tickets, guides, equipment rentals)",
" • Meals (by venue type and meal time)",
" • Shopping allowance",
" • Emergency buffer",
" - Provide cost-saving alternatives while maintaining experience quality",
" - Consider seasonal pricing variations",
"",
"7. Research Tools Usage:",
" - Use Exa for deep destination research including:",
" • Seasonal events and festivals",
" • Local customs and etiquette",
" • Weather patterns and best visit times",
" - Employ Firecrawl for real-time data on:",
" • Venue reviews and ratings",
" • Current pricing and availability",
" • Booking platforms and deals",
"",
"8. Personalization Elements:",
" - Reference and incorporate past travel experiences",
" - Avoid previously visited locations unless requested",
" - Match recommendations to stated preferences",
" - Add personal touches based on special occasions or interests",
"",
"9. Final Itinerary Crafting:",
" - Ensure perfect flow between all elements",
" - Include buffer time for transitions",
" - Add local tips and insider knowledge",
" - Provide backup options for key elements",
" - Format for both inspiration and practical use",
],
expected_output="""
A meticulously detailed, day-by-day travel itinerary in Markdown format including:
**I. Executive Summary**
- 🎯 Trip Purpose & Vision
• Primary goals and desired experiences
• Special occasions or celebrations
• Key preferences and must-haves
- ✈️ Travel Overview
• Exact dates with day count
• All destinations in sequence
• Group composition and dynamics
• Overall style and pace
• Total budget range and currency
- 💫 Experience Highlights
• Signature moments and unique experiences
• Special arrangements and exclusives
• Instagram-worthy locations
• Cultural immersion opportunities
**II. Travel Logistics**
- 🛫 Outbound Journey
• Flight/train details with exact timings
• Carrier information and booking references
• Seat recommendations
• Baggage allowances and restrictions
• Airport/station transfer details
• Check-in instructions
- 🛬 Return Journey
• Return transportation specifics
• Timing coordination with checkout
• Alternative options if available
**III. Detailed Daily Itinerary**
For each day (e.g., "Day 1 - Monday, July 1, 2025"):
- 🌅 Morning (6am-12pm)
• Wake-up time and morning routine
• Breakfast venue with menu highlights
• Morning activities with durations
• Transport between locations
• Tips for timing and crowds
- ☀️ Afternoon (12pm-6pm)
• Lunch recommendations with price range
• Main activities and experiences
• Rest periods and flexibility
• Photo opportunities
• Indoor/outdoor alternatives
- 🌙 Evening (6pm-onwards)
• Dinner reservations and details
• Evening entertainment
• Nightlife options if desired
• Transport back to accommodation
- 🏨 Accommodation
• Property name and room type
• Check-in/out times
• Key amenities and features
• Location benefits
• Booking confirmation details
- 📝 Daily Notes
• Weather considerations
• Dress code requirements
• Advance bookings needed
• Local customs and tips
• Emergency contacts
**IV. Accommodation Details**
For each property:
- 📍 Location & Access
• Exact address and coordinates
• Transport options and costs
• Surrounding area highlights
• Distance to key attractions
- 🛎️ Property Features
• Room types and views
• Included amenities
• Dining options
• Special services
• Unique selling points
- 💰 Costs & Booking
• Nightly rates and taxes
• Additional fees
• Cancellation policy
• Payment methods
• Booking platform links
**V. Curated Experiences**
- 🎭 Activities & Attractions
• Name and description
• Operating hours and duration
• Admission fees
• Booking requirements
• Insider tips
• Alternative options
• Accessibility notes
- 🍽️ Dining Experiences
• Restaurant details and cuisine
• Price ranges and menu highlights
• Ambiance and dress code
• Reservation policies
• Signature dishes
• Dietary accommodation
• View/seating recommendations
**VI. Comprehensive Budget**
- 💵 Total Trip Cost
• Grand total in user's currency
• Exchange rates used
• Payment timeline
- 📊 Detailed Breakdown
• Transportation
- Flights/trains
- Local transport
- Airport transfers
• Accommodations
- Nightly rates
- Taxes and fees
- Extra services
• Activities
- Admission fees
- Guide costs
- Equipment rental
• Dining
- Breakfast allowance
- Lunch budget
- Dinner budget
- Drinks/snacks
• Shopping & Souvenirs
• Emergency Fund
• Optional Upgrades
**VII. Essential Information**
- 📋 Pre-Trip Preparation
• Visa requirements
• Health and insurance
• Packing recommendations
• Weather forecasts
• Currency exchange tips
- 🗺️ Destination Guide
• Local customs and etiquette
• Language basics
• Emergency contacts
• Medical facilities
• Shopping areas
• Local transport options
- 📱 Digital Resources
• Useful apps
• Booking confirmations
• Maps and directions
• Restaurant reservations
• Activity tickets
- ⚠️ Contingency Plans
• Weather alternatives
• Backup restaurants
• Emergency contacts
• Travel insurance details
• Cancellation policies
Format the entire itinerary with:
• Clear section headers
• Consistent emoji usage
• Bullet points and sub-bullets
• Tables where appropriate
• Highlighted important information
• Links to all bookings and reservations
• Day-specific weather forecasts
• Local emergency numbers
• Relevant photos and maps
""",
success_criteria=[
"✅ Complete itinerary with all travel days and activities",
"✅ Stays within budget constraints",
"✅ Matches user priorities and travel style",
"✅ Well-structured daily schedule matching user's pace",
"✅ Real flights and accommodations with costs and links",
"✅ Daily activities aligned with selected vibes",
"✅ Clear Markdown format with good visuals",
"✅ Realistic budget breakdown",
"✅ Personalized tips based on user profile",
"✅ Verified, real-world locations only",
],
enable_agentic_context=True,
share_member_interactions=True,
show_members_responses=True,
add_datetime_to_instructions=True,
add_member_tools_to_system_message=True,
# debug_mode=True,
telemetry=False,
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/structured_output.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/structured_output.py | from typing import TypeVar, Type, Any
from pydantic import BaseModel
from agno.agent import Agent
from loguru import logger
from config.llm import model
import json
import re
from pydantic import ValidationError
T = TypeVar("T", bound=BaseModel)
def clean_json_string(json_str: str) -> str:
"""
Clean a JSON string by removing markdown code blocks and any extra whitespace.
Args:
json_str (str): The JSON string to clean
Returns:
str: The cleaned JSON string
"""
# Remove markdown code blocks
json_str = re.sub(r"```(?:json)?\n?(.*?)```", r"\1", json_str, flags=re.DOTALL)
# If no code blocks found, use the original string
if not json_str.strip():
json_str = json_str
# Remove any leading/trailing whitespace
json_str = json_str.strip()
return json_str
async def convert_to_model(input_text: str, target_model: Type[T]) -> str:
"""
Convert input text into a specified Pydantic model using an Agno agent.
Args:
input_text (str): The input text to convert
target_model (Type[T]): The target Pydantic model class
Returns:
str: A JSON string that matches the model schema
"""
logger.info(
f"Converting input text to model: {target_model.__name__} : {input_text}"
)
structured_output_agent = Agent(
model=model,
description=(
"You are an expert at extracting structured travel planning information from unstructured, free-form user inputs. "
"Given a detailed user message, travel description, or conversation, your goal is to accurately populate a predefined trip schema. "
),
instructions=[
"Your task is to convert the input text into a valid JSON that matches the model schema exactly.",
"You must return ONLY the JSON object that matches the schema exactly - no other output.",
"When formatting text fields, you must:",
"- Use minimal, consistent formatting throughout",
"- Apply appropriate list formatting",
"- Format dates, times and structured data consistently",
"- Structure text concisely and clearly",
],
markdown=True,
expected_output="""
A valid JSON object that matches the provided schema.
Text fields should be clean and consistently formatted.
Do not include any explanations or additional text - return only the JSON object.
Without ```json or ```
""",
)
schema = target_model.model_json_schema()
schema_str = json.dumps(schema, indent=2)
# Create the prompt with model schema and clear instructions
prompt = f"""
Your task is to convert the input text into a valid JSON object that exactly matches the provided schema.
Do not include any explanations or additional text - return only the JSON object.
Model schema:
{schema_str}
Rules:
- Output must be valid JSON
- All required fields must be included
- Field types must match schema exactly
- No extra fields allowed
- Validate all constraints (min/max values, regex patterns, etc)
Text Formatting Requirements:
- Use consistent, clean text formatting throughout all string fields
- For list items, use bullet points (•) instead of asterisks (*)
- Minimize indentation and whitespace in text fields
- Use line breaks sparingly and consistently
- Avoid formatting characters like asterisks (*) in text
- Don't include unnecessary prefixes or labels in text content
- Format times, dates, durations, and prices consistently
- Make sure all fields contain data appropriate for their purpose
Input text to convert:
{input_text}
"""
# Get structured response from the agent
try:
response = await structured_output_agent.arun(prompt)
json_string = clean_json_string(response.content)
logger.info(f"Structured output agent response: {json_string}")
# Parse the JSON string
try:
json.loads(json_string)
return json_string
except json.JSONDecodeError as json_err:
logger.error(f"JSON parsing error: {str(json_err)}")
raise ValueError(f"Invalid JSON response: {str(json_err)}")
except Exception as e:
logger.error(f"Failed to parse response into {target_model.__name__}: {str(e)}")
raise ValueError(
f"Failed to parse response into {target_model.__name__}: {str(e)}"
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/itinerary.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/itinerary.py | from agno.agent import Agent
from agno.tools.exa import ExaTools
from agno.tools.firecrawl import FirecrawlTools
from agno.tools.reasoning import ReasoningTools
from config.llm import model
from typing import Optional
from datetime import datetime, timedelta
from textwrap import dedent
itinerary_agent = Agent(
name="Itinerary Specialist",
model=model,
tools=[
ExaTools(num_results=8),
FirecrawlTools(formats=["markdown"]),
ReasoningTools(add_instructions=True),
],
markdown=True,
description=dedent(
"""\
You are a master itinerary creator with expertise in crafting detailed, perfectly-timed daily travel plans.
You turn abstract travel details into structured, hour-by-hour plans that maximize enjoyment while maintaining
a realistic pace. You're skilled at adapting schedules to match traveler preferences, weather conditions,
opening hours, and local customs. Your itineraries are practical, thoroughly researched, and full of
insider timing tips that make travel smooth and stress-free."""
),
instructions=[
"1. Create perfectly balanced day-by-day itineraries with meticulous timing:",
" - Structure each day into morning, afternoon, and evening blocks",
" - Include exact timing for each activity (start/end times)",
" - Account for realistic travel times between locations",
" - Balance sightseeing with leisure and rest periods",
" - Adapt pace to match traveler preferences (relaxed, moderate, fast)",
"",
"2. Ensure practical logistics in all schedules:",
" - Verify operating hours for all attractions, restaurants, and services",
" - Account for common delays (security lines, crowds, traffic)",
" - Include buffer time between activities",
" - Check for day-specific closures (weekends, holidays, seasonal)",
" - Consider local transportation options and schedules",
"",
"3. Optimize activity timing with expert knowledge:",
" - Schedule visits during off-peak hours when possible",
" - Plan indoor activities during likely rainy/hot periods",
" - Arrange sunrise/sunset experiences at optimal times",
" - Schedule meals during traditional local dining hours",
" - Time activities to avoid rush hour transportation",
"",
"4. Create custom scheduling for specific traveler types:",
" - Families: Include kid-friendly breaks and early dinners",
" - Seniors: More relaxed pace with ample rest periods",
" - Young adults: Later start times and evening activities",
" - Luxury travelers: Timing for exclusive experiences",
" - Business travelers: Efficient scheduling around work commitments",
"",
"5. Enhance itineraries with practical timing details:",
" - Best arrival times to avoid lines at attractions",
" - Photography timing for optimal lighting",
" - Meal reservations timed around activities",
" - Shopping hours for local markets and stores",
" - Weather-dependent backup plans",
"",
"6. Research tools usage for accurate scheduling:",
" - Use Exa to research location-specific timing information",
" - Employ FirecrawlTools for current operating hours and conditions",
" - Use ReasoningTools to optimize activity sequence and timing",
"",
"7. Format day plans with maximum clarity:",
" - Use clear time blocks (8:00 AM - 9:30 AM)",
" - Include travel method and duration between locations",
" - Highlight reservation times and booking requirements",
" - Note required advance arrival times (security, check-in)",
" - Use emojis for better visual organization",
],
expected_output=dedent(
"""\
# Detailed Itinerary: {Destination} ({Start Date} - {End Date})
## Trip Overview
- **Dates**: {exact dates with day count}
- **Travelers**: {number and type}
- **Pace**: {relaxed/moderate/fast}
- **Style**: {luxury/mid-range/budget}
- **Priorities**: {key interests and goals}
## Day 1: {Day of Week}, {Date}
### Morning
- **7:00 AM - 8:00 AM**: Breakfast at {location}
- **8:30 AM - 10:30 AM**: {Activity} at {location}
* Notes: {special instructions, timing tips}
* Travel: {transport method, duration}
- **11:00 AM - 12:30 PM**: {Activity} at {location}
* Notes: {special instructions, timing tips}
* Travel: {transport method, duration}
### Afternoon
- **1:00 PM - 2:00 PM**: Lunch at {location}
- **2:30 PM - 4:30 PM**: {Activity} at {location}
* Notes: {special instructions, timing tips}
* Travel: {transport method, duration}
- **5:00 PM - 6:00 PM**: Rest/refresh at hotel
### Evening
- **7:00 PM - 8:30 PM**: Dinner at {location}
- **9:00 PM - 10:30 PM**: {Activity} at {location}
* Notes: {special instructions, timing tips}
* Travel: {transport method, duration}
## Day 2: {Day of Week}, {Date}
[Similar detailed breakdown]
[Continue for each day of the trip]
## Practical Notes
- **Weather Considerations**: {weather-related timing adjustments}
- **Transportation Tips**: {local transport timing advice}
- **Reservation Reminders**: {all pre-booked times}
- **Backup Plans**: {alternative schedules for weather/closures}
"""
),
add_datetime_to_instructions=True,
show_tool_calls=True,
retries=2,
delay_between_retries=2,
exponential_backoff=True,
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/plan_task.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/plan_task.py | from datetime import datetime, timezone
from enum import Enum
from typing import Optional
from sqlalchemy import String, DateTime, Enum as SQLEnum, JSON
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
class TaskStatus(str, Enum):
queued = "queued"
in_progress = "in_progress"
success = "success"
error = "error"
@classmethod
def _missing_(cls, value):
"""Handle case-insensitive enum values."""
for member in cls:
if member.value.lower() == value.lower():
return member
return None
class Base(DeclarativeBase):
pass
class PlanTask(Base):
"""Model for tracking plan tasks and their states."""
__tablename__ = "plan_tasks"
id: Mapped[int] = mapped_column(primary_key=True)
trip_plan_id: Mapped[str] = mapped_column(String(50), index=True)
task_type: Mapped[str] = mapped_column(String(50))
status: Mapped[TaskStatus] = mapped_column(
SQLEnum(TaskStatus, name="plan_task_status")
)
input_data: Mapped[dict] = mapped_column(JSON)
output_data: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True)
error_message: Mapped[Optional[str]] = mapped_column(String(500), nullable=True)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
default=lambda: datetime.now(timezone.utc),
onupdate=lambda: datetime.now(timezone.utc),
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/flight.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/flight.py | from pydantic import BaseModel, Field
from typing import List, Optional
class FlightResult(BaseModel):
flight_number: str = Field(description="The flight number of the flight")
price: str = Field(description="The price of the flight")
airline: str = Field(description="The airline of the flight")
departure_time: str = Field(description="The departure time of the flight")
arrival_time: str = Field(description="The arrival time of the flight")
duration: str = Field(description="The duration of the flight")
stops: int = Field(description="The number of stops of the flight")
class FlightResults(BaseModel):
flights: List[FlightResult] = Field(description="The list of flights")
class FlightSearchRequest(BaseModel):
departure: str = Field(description="The departure airport")
destination: str = Field(description="The destination airport")
date: str = Field(description="The date of the flight")
return_date: Optional[str] = Field(description="The return date of the flight")
adults: int = Field(description="The number of adults")
children: int = Field(description="The number of children")
cabin_class: str = Field(description="The cabin class")
sort: str = Field(description="The sort order") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/trip_db.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/trip_db.py | from sqlalchemy import Column, String, TIMESTAMP, ForeignKey, Text, DateTime
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime, timezone
from typing import Optional
from cuid2 import Cuid
CUID_GENERATOR: Cuid = Cuid()
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
class Base(DeclarativeBase):
pass
class TripPlan(Base):
__tablename__ = (
"trip_plan" # Assuming this table exists as per foreign key constraints
)
id = Column(
String, primary_key=True, default=lambda: str(CUID_GENERATOR.generate())
)
# Add other fields for TripPlan if needed for standalone model definition
# For this task, we only need it to satisfy relationship constraints if defined from this end.
class TripPlanStatus(Base):
"""Model for tracking trip plan status."""
__tablename__ = "trip_plan_status"
id: Mapped[str] = mapped_column(
Text, primary_key=True, default=lambda: CUID_GENERATOR.generate()
)
tripPlanId: Mapped[str] = mapped_column(Text, index=True)
status: Mapped[str] = mapped_column(Text, default="pending")
currentStep: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
error: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
startedAt: Mapped[Optional[datetime]] = mapped_column(
DateTime(timezone=False), nullable=True
)
completedAt: Mapped[Optional[datetime]] = mapped_column(
DateTime(timezone=False), nullable=True
)
createdAt: Mapped[datetime] = mapped_column(
DateTime(timezone=False),
default=lambda: datetime.now(timezone.utc).replace(tzinfo=None),
)
updatedAt: Mapped[datetime] = mapped_column(
DateTime(timezone=False),
default=lambda: datetime.now(timezone.utc).replace(tzinfo=None),
onupdate=lambda: datetime.now(timezone.utc).replace(tzinfo=None),
)
# Relationship (optional, but good practice)
# trip_plan = relationship("TripPlan") # Define TripPlan model if you want to use this relationship
class TripPlanOutput(Base):
"""Model for storing trip plan output."""
__tablename__ = "trip_plan_output"
id: Mapped[str] = mapped_column(
Text, primary_key=True, default=lambda: CUID_GENERATOR.generate()
)
tripPlanId: Mapped[str] = mapped_column(Text, index=True)
itinerary: Mapped[str] = mapped_column(Text)
summary: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
createdAt: Mapped[datetime] = mapped_column(
DateTime(timezone=False),
default=lambda: datetime.now(timezone.utc).replace(tzinfo=None),
)
updatedAt: Mapped[datetime] = mapped_column(
DateTime(timezone=False),
default=lambda: datetime.now(timezone.utc).replace(tzinfo=None),
onupdate=lambda: datetime.now(timezone.utc).replace(tzinfo=None),
)
# Relationship (optional)
# trip_plan = relationship("TripPlan") # Define TripPlan model
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/hotel.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/hotel.py | from pydantic import BaseModel, Field
from typing import List
class HotelResult(BaseModel):
hotel_name: str = Field(description="The name of the hotel")
price: str = Field(description="The price of the hotel")
rating: str = Field(description="The rating of the hotel")
address: str = Field(description="The address of the hotel")
amenities: List[str] = Field(description="The amenities of the hotel")
description: str = Field(description="The description of the hotel")
url: str = Field(description="The url of the hotel")
class HotelResults(BaseModel):
hotels: List[HotelResult] = Field(description="The list of hotels")
class HotelSearchRequest(BaseModel):
destination: str = Field(description="The destination city or area")
check_in: str = Field(description="The date of check-in in the format 'YYYY-MM-DD'")
check_out: str = Field(description="The date of check-out in the format 'YYYY-MM-DD'")
adults: int = Field(description="The number of adults")
children: int = Field(description="The number of children")
rooms: int = Field(description="The number of rooms")
sort: str = Field(description="The sort order") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/travel_plan.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/travel_plan.py | from pydantic import BaseModel, Field
from typing import List
from models.hotel import HotelResult
class TravelDates(BaseModel):
start: str = ""
end: str = ""
class TravelPlanRequest(BaseModel):
name: str = ""
destination: str = ""
starting_location: str = ""
travel_dates: TravelDates = TravelDates()
date_input_type: str = "picker"
duration: int = 0
traveling_with: str = ""
adults: int = 1
children: int = 0
age_groups: List[str] = []
budget: int = 75000
budget_currency: str = "INR"
travel_style: str = ""
budget_flexible: bool = False
vibes: List[str] = []
priorities: List[str] = []
interests: str = ""
rooms: int = 1
pace: List[int] = [3]
been_there_before: str = ""
loved_places: str = ""
additional_info: str = ""
class TravelPlanAgentRequest(BaseModel):
trip_plan_id: str
travel_plan: TravelPlanRequest
class TravelPlanResponse(BaseModel):
success: bool
message: str
trip_plan_id: str
class DayByDayPlan(BaseModel):
day: int = Field(
default=0, description="The day number in the itinerary, starting from 0"
)
date: str = Field(
default="", description="The date for this day in YYYY-MM-DD format"
)
morning: str = Field(
default="", description="Description of morning activities and plans"
)
afternoon: str = Field(
default="", description="Description of afternoon activities and plans"
)
evening: str = Field(
default="", description="Description of evening activities and plans"
)
notes: str = Field(
default="",
description="Additional tips, reminders or important information for the day",
)
class Attraction(BaseModel):
name: str = Field(default="", description="Name of the attraction")
description: str = Field(
default="", description="Detailed description of the attraction"
)
class FlightResult(BaseModel):
duration: str = Field(default="", description="Duration of the flight")
price: str = Field(
default="", description="Price of the flight in the local currency"
)
departure_time: str = Field(default="", description="Departure time of the flight")
arrival_time: str = Field(default="", description="Arrival time of the flight")
airline: str = Field(default="", description="Airline of the flight")
flight_number: str = Field(default="", description="Flight number of the flight")
url: str = Field(default="", description="Website or booking URL for the flight")
stops: int = Field(default=0, description="Number of stops in the flight")
class RestaurantResult(BaseModel):
name: str = Field(default="", description="Name of the restaurant")
description: str = Field(default="", description="Description of the restaurant")
location: str = Field(default="", description="Location of the restaurant")
url: str = Field(
default="", description="Website or booking URL for the restaurant"
)
class TravelPlanTeamResponse(BaseModel):
day_by_day_plan: List[DayByDayPlan] = Field(
description="A list of day-by-day plans for the trip"
)
hotels: List[HotelResult] = Field(description="A list of hotels for the trip")
attractions: List[Attraction] = Field(
description="A list of recommended attractions for the trip"
)
flights: List[FlightResult] = Field(description="A list of flights for the trip")
restaurants: List[RestaurantResult] = Field(
description="A list of recommended restaurants for the trip"
)
budget_insights: List[str] = Field(
description="A list of budget insights for the trip"
)
tips: List[str] = Field(
description="A list of tips or recommendations for the trip"
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/repository/plan_task_repository.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/repository/plan_task_repository.py | from datetime import datetime, timezone
from typing import Optional, List
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from models.plan_task import PlanTask, TaskStatus
from services.db_service import get_db_session
async def create_plan_task(
trip_plan_id: str,
task_type: str,
input_data: dict,
status: TaskStatus = TaskStatus.queued,
) -> PlanTask:
"""Create a new plan task."""
async with get_db_session() as session:
task = PlanTask(
trip_plan_id=trip_plan_id,
task_type=task_type,
status=status,
input_data=input_data,
)
session.add(task)
await session.commit()
await session.refresh(task)
return task
async def update_task_status(
task_id: int,
status: TaskStatus,
output_data: Optional[dict] = None,
error_message: Optional[str] = None,
) -> Optional[PlanTask]:
"""Update the status and output of a plan task."""
async with get_db_session() as session:
result = await session.execute(select(PlanTask).where(PlanTask.id == task_id))
task = result.scalar_one_or_none()
if task:
task.status = status
if output_data is not None:
task.output_data = output_data
if error_message is not None:
task.error_message = error_message
task.updated_at = datetime.now(timezone.utc)
await session.commit()
await session.refresh(task)
return task
async def get_task_by_id(task_id: int) -> Optional[PlanTask]:
"""Get a plan task by its ID."""
async with get_db_session() as session:
result = await session.execute(select(PlanTask).where(PlanTask.id == task_id))
return result.scalar_one_or_none()
async def get_tasks_by_trip_plan(trip_plan_id: str) -> List[PlanTask]:
"""Get all tasks for a specific trip plan."""
async with get_db_session() as session:
result = await session.execute(
select(PlanTask).where(PlanTask.trip_plan_id == trip_plan_id)
)
return list(result.scalars().all())
async def get_tasks_by_status(status: TaskStatus) -> List[PlanTask]:
"""Get all tasks with a specific status."""
async with get_db_session() as session:
result = await session.execute(
select(PlanTask).where(PlanTask.status == status)
)
return list(result.scalars().all())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/repository/trip_plan_repository.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/repository/trip_plan_repository.py | from datetime import datetime, timezone
from typing import Optional, List
from sqlalchemy import select, delete
from sqlalchemy.ext.asyncio import AsyncSession
from models.trip_db import TripPlanStatus, TripPlanOutput
from services.db_service import get_db_session
async def create_trip_plan_status(
trip_plan_id: str, status: str = "pending", current_step: Optional[str] = None
) -> TripPlanStatus:
"""Create a new trip plan status entry."""
async with get_db_session() as session:
status_entry = TripPlanStatus(
tripPlanId=trip_plan_id,
status=status,
currentStep=current_step,
createdAt=datetime.now().replace(tzinfo=None),
updatedAt=datetime.now().replace(tzinfo=None),
)
session.add(status_entry)
await session.commit()
await session.refresh(status_entry)
return status_entry
async def get_trip_plan_status(trip_plan_id: str) -> Optional[TripPlanStatus]:
"""Get the status entry for a trip plan."""
async with get_db_session() as session:
result = await session.execute(
select(TripPlanStatus).where(TripPlanStatus.tripPlanId == trip_plan_id)
)
return result.scalar_one_or_none()
async def update_trip_plan_status(
trip_plan_id: str,
status: str,
current_step: Optional[str] = None,
error: Optional[str] = None,
started_at: Optional[datetime] = None,
completed_at: Optional[datetime] = None,
) -> Optional[TripPlanStatus]:
"""Update the status of a trip plan."""
async with get_db_session() as session:
result = await session.execute(
select(TripPlanStatus).where(TripPlanStatus.tripPlanId == trip_plan_id)
)
status_entry = result.scalar_one_or_none()
if status_entry:
status_entry.status = status
if current_step is not None:
status_entry.currentStep = current_step
if error is not None:
status_entry.error = error
if started_at is not None:
status_entry.startedAt = started_at.replace(tzinfo=None)
if completed_at is not None:
status_entry.completedAt = completed_at.replace(tzinfo=None)
status_entry.updatedAt = datetime.now(timezone.utc).replace(tzinfo=None)
await session.commit()
await session.refresh(status_entry)
return status_entry
async def create_trip_plan_output(
trip_plan_id: str, itinerary: str, summary: Optional[str] = None
) -> TripPlanOutput:
"""Create a new trip plan output entry."""
async with get_db_session() as session:
output_entry = TripPlanOutput(
tripPlanId=trip_plan_id,
itinerary=itinerary,
summary=summary,
createdAt=datetime.now(timezone.utc).replace(tzinfo=None),
updatedAt=datetime.now(timezone.utc).replace(tzinfo=None),
)
session.add(output_entry)
await session.commit()
await session.refresh(output_entry)
return output_entry
async def get_trip_plan_output(trip_plan_id: str) -> Optional[TripPlanOutput]:
"""Get the output entry for a trip plan."""
async with get_db_session() as session:
result = await session.execute(
select(TripPlanOutput).where(TripPlanOutput.tripPlanId == trip_plan_id)
)
return result.scalar_one_or_none()
async def update_trip_plan_output(
trip_plan_id: str, itinerary: Optional[str] = None, summary: Optional[str] = None
) -> Optional[TripPlanOutput]:
"""Update the output of a trip plan."""
async with get_db_session() as session:
result = await session.execute(
select(TripPlanOutput).where(TripPlanOutput.tripPlanId == trip_plan_id)
)
output_entry = result.scalar_one_or_none()
if output_entry:
if itinerary is not None:
output_entry.itinerary = itinerary
if summary is not None:
output_entry.summary = summary
output_entry.updatedAt = datetime.now(timezone.utc).replace(tzinfo=None)
await session.commit()
await session.refresh(output_entry)
return output_entry
async def get_all_pending_trip_plans() -> List[TripPlanStatus]:
"""Get all trip plans with pending status."""
async with get_db_session() as session:
result = await session.execute(
select(TripPlanStatus).where(TripPlanStatus.status == "pending")
)
return list(result.scalars().all())
async def get_all_processing_trip_plans() -> List[TripPlanStatus]:
"""Get all trip plans with processing status."""
async with get_db_session() as session:
result = await session.execute(
select(TripPlanStatus).where(TripPlanStatus.status == "processing")
)
return list(result.scalars().all())
async def get_trip_plans_by_status(status: str) -> List[TripPlanStatus]:
"""Get all trip plans with a specific status."""
async with get_db_session() as session:
result = await session.execute(
select(TripPlanStatus).where(TripPlanStatus.status == status)
)
return list(result.scalars().all())
async def delete_trip_plan_outputs(trip_plan_id: str) -> None:
"""Delete all output entries for a given trip plan ID."""
async with get_db_session() as session:
await session.execute(
delete(TripPlanOutput).where(TripPlanOutput.tripPlanId == trip_plan_id)
)
await session.commit()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/api/__init__.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/api/__init__.py | from .app import app
__all__ = ["app"] | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/api/app.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/api/app.py | from fastapi import FastAPI, APIRouter
from fastapi.middleware.cors import CORSMiddleware
from loguru import logger
from datetime import datetime, timezone
from contextlib import asynccontextmanager
from services.db_service import initialize_db_pool, close_db_pool
from router.plan import router as plan_router
router = APIRouter(prefix="/api")
@router.get("/health", summary="API Health Check")
async def health_check():
logger.debug("Health check requested")
return {"status": "healthy", "timestamp": datetime.now(timezone.utc).isoformat()}
@asynccontextmanager
async def lifespan(app: FastAPI):
# Startup logic
logger.info("API server started")
# Initialize database connection pool
logger.info("Initializing database connection pool")
await initialize_db_pool()
logger.info("Database connection pool initialized")
yield
# Shutdown logic
# Close database connection pool
logger.info("Closing database connection pool")
await close_db_pool()
logger.info("API server shutting down")
app = FastAPI(
title="TripCraft AI API",
description="API for running intelligent trip planning in the background",
version="0.1.0",
lifespan=lifespan,
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(router)
app.include_router(plan_router)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/config/llm.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/config/llm.py | from agno.models.google import Gemini
from agno.models.openai import OpenAIChat
from agno.models.openrouter import OpenRouter
# model = Gemini(id="gemini-2.0-flash-001", temperature=0.1)
# model2 = OpenAIChat(id="gpt-4o", temperature=0.1)
model = OpenRouter(id="google/gemini-2.0-flash-001", temperature=0.3, max_tokens=8096)
model2 = OpenRouter(id="openai/gpt-4o", temperature=0.1)
model_zero = OpenRouter(
id="google/gemini-2.0-flash-001", temperature=0.1, max_tokens=8096
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/config/logger.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/config/logger.py | import sys
import logging
import inspect
from typing import Dict, Any, Callable
from loguru import logger
from pathlib import Path
# Create logs directory if it doesn't exist
# LOGS_DIR = Path("logs")
# LOGS_DIR.mkdir(exist_ok=True)
def configure_logger(console_level: str = "INFO", log_format: str = None) -> None:
"""Configure loguru logger with console and file outputs
Args:
console_level: Minimum level for console logs
file_level: Minimum level for file logs
rotation: When to rotate log files (size or time)
retention: How long to keep log files
log_format: Optional custom format string
"""
# Remove default configuration
logger.remove()
# Use default format if none provided
if log_format is None:
log_format = "<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>"
# Add console handler
logger.add(
sys.stderr,
format=log_format,
level=console_level,
colorize=True,
backtrace=True,
diagnose=True,
)
# # Add file handler
# logger.add(
# LOGS_DIR / "app.log",
# format=log_format,
# level=console_level,
# )
# Intercept standard library logging to loguru
class InterceptHandler(logging.Handler):
"""Intercepts standard library logging and redirects to loguru"""
def emit(self, record: logging.LogRecord) -> None:
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame, depth = inspect.currentframe(), 0
while frame and frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
)
def patch_std_logging():
"""Patch all standard library loggers to use loguru"""
# Replace all existing handlers with the InterceptHandler
logging.basicConfig(handlers=[InterceptHandler()], level=0, force=True)
# Update all existing loggers
for name in logging.root.manager.loggerDict.keys():
logging_logger = logging.getLogger(name)
logging_logger.handlers = [InterceptHandler()]
logging_logger.propagate = False
# Update specific common libraries
for logger_name in ("uvicorn", "uvicorn.error", "uvicorn.access", "fastapi"):
logging_logger = logging.getLogger(logger_name)
logging_logger.handlers = [InterceptHandler()]
def setup_logging(console_level: str = "INFO", intercept_stdlib: bool = True) -> None:
"""Setup logging for the entire application
Args:
console_level: Minimum level for console output
file_level: Minimum level for file output
intercept_stdlib: Whether to patch standard library logging
"""
# Configure loguru
configure_logger(console_level=console_level)
# Optionally patch standard library logging
if intercept_stdlib:
patch_std_logging()
# Add extra context to logger
logger.configure(extra={"app_name": "decipher-research-agent"})
logger.info("Logging configured successfully")
def logger_hook(function_name: str, function_call: Callable, arguments: Dict[str, Any]):
"""Hook function that wraps the tool execution"""
logger.info(f"About to call {function_name} with arguments: {arguments}")
result = function_call(**arguments)
logger.info(f"Function call completed with result: {result}")
return result
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/router/plan.py | advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/router/plan.py | import asyncio
from fastapi import APIRouter, HTTPException, status
from loguru import logger
from models.travel_plan import TravelPlanAgentRequest, TravelPlanResponse
from models.plan_task import TaskStatus
from services.plan_service import generate_travel_plan
from repository.plan_task_repository import create_plan_task, update_task_status
from typing import List
router = APIRouter(prefix="/api/plan", tags=["Travel Plan"])
@router.post(
"/trigger",
response_model=TravelPlanResponse,
summary="Trigger Trip Craft Agent",
description="Triggers the travel plan agent with the provided travel details",
)
async def trigger_trip_craft_agent(
request: TravelPlanAgentRequest,
) -> TravelPlanResponse:
"""
Trigger the trip craft agent to create a personalized travel itinerary.
Args:
request: Travel plan request containing trip details and plan ID
Returns:
TravelPlanResponse: Success status and trip plan ID
"""
try:
logger.info(f"Triggering travel plan agent for trip ID: {request.trip_plan_id}")
logger.info(f"Travel plan details: {request.travel_plan}")
# Create initial task
task = await create_plan_task(
trip_plan_id=request.trip_plan_id,
task_type="travel_plan_generation",
input_data=request.travel_plan.model_dump(),
)
logger.info(f"Task created: {task.id}")
# Create background task for plan generation
async def generate_plan_with_tracking():
try:
# Update task status to in progress when service starts
await update_task_status(task.id, TaskStatus.in_progress)
logger.info(f"Task updated to in progress: {task.id}")
result = await generate_travel_plan(request)
# Update task with success status and output
await update_task_status(
task.id, TaskStatus.success, output_data={"travel_plan": result}
)
logger.info(f"Task updated to success: {task.id}")
except Exception as e:
logger.error(f"Error generating travel plan: {str(e)}")
# Update task with error status
await update_task_status(
task.id, TaskStatus.error, error_message=str(e)
)
logger.info(f"Task updated to error: {task.id}")
raise
asyncio.create_task(generate_plan_with_tracking())
logger.info(
f"Travel plan agent triggered successfully for trip ID: {request.trip_plan_id}"
)
return TravelPlanResponse(
success=True,
message="Travel plan agent triggered successfully",
trip_plan_id=request.trip_plan_id,
)
except Exception as e:
logger.error(f"Error triggering travel plan agent: {str(e)}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Failed to trigger travel plan agent: {str(e)}",
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/multi_agent_researcher/research_agent_llama3.py | advanced_ai_agents/multi_agent_apps/multi_agent_researcher/research_agent_llama3.py | # Import the required libraries
import streamlit as st
from agno.agent import Agent
from agno.run.agent import RunOutput
from agno.team import Team
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.tools.hackernews import HackerNewsTools
from agno.tools.newspaper4k import Newspaper4kTools
from agno.models.ollama import Ollama
# Set up the Streamlit app
st.title("Multi-Agent AI Researcher using Llama-3 🔍🤖")
st.caption("This app allows you to research top stories and users on HackerNews and write blogs, reports and social posts.")
# Create the specialized agents
hn_researcher = Agent(
name="HackerNews Researcher",
model=Ollama(id="llama3.2", max_tokens=1024),
role="Gets top stories from hackernews.",
tools=[HackerNewsTools()],
)
web_searcher = Agent(
name="Web Searcher",
model=Ollama(id="llama3.2", max_tokens=1024),
role="Searches the web for information on a topic",
tools=[DuckDuckGoTools()],
add_datetime_to_context=True,
)
article_reader = Agent(
name="Article Reader",
model=Ollama(id="llama3.2", max_tokens=1024),
role="Reads articles from URLs.",
tools=[Newspaper4kTools()],
)
hackernews_team = Team(
name="HackerNews Team",
model=Ollama(id="llama3.2", max_tokens=1024),
members=[hn_researcher, web_searcher, article_reader],
instructions=[
"First, search hackernews for what the user is asking about.",
"Then, ask the article reader to read the links for the stories to get more information.",
"Important: you must provide the article reader with the links to read.",
"Then, ask the web searcher to search for each story to get more information.",
"Finally, provide a thoughtful and engaging summary.",
],
markdown=True,
debug_mode=True,
show_members_responses=True,
)
# Input field for the report query
query = st.text_input("Enter your report query")
if query:
# Get the response from the assistant
response: RunOutput = hackernews_team.run(query, stream=False)
st.write(response.content) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/multi_agent_apps/multi_agent_researcher/research_agent.py | advanced_ai_agents/multi_agent_apps/multi_agent_researcher/research_agent.py | # Import the required libraries
import streamlit as st
from agno.agent import Agent
from agno.run.agent import RunOutput
from agno.models.openai import OpenAIChat
from agno.team import Team
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.tools.hackernews import HackerNewsTools
from agno.tools.newspaper4k import Newspaper4kTools
import os
# Set up the Streamlit app
st.title("Multi-Agent AI Researcher 🔍🤖")
st.caption("This app allows you to research top stories and users on HackerNews and write blogs, reports and social posts.")
# Get OpenAI API key from user
openai_api_key = st.text_input("OpenAI API Key", type="password")
os.environ["OPENAI_API_KEY"] = openai_api_key
if openai_api_key:
hn_researcher = Agent(
name="HackerNews Researcher",
model=OpenAIChat(id="gpt-4o-mini"),
role="Gets top stories from hackernews.",
tools=[HackerNewsTools()],
)
web_searcher = Agent(
name="Web Searcher",
model=OpenAIChat(id="gpt-4o-mini"),
role="Searches the web for information on a topic",
tools=[DuckDuckGoTools()],
add_datetime_to_context=True,
)
article_reader = Agent(
name="Article Reader",
model=OpenAIChat(id="gpt-4o-mini"),
role="Reads articles from URLs.",
tools=[Newspaper4kTools()],
)
hackernews_team = Team(
name="HackerNews Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=[hn_researcher, web_searcher, article_reader],
instructions=[
"First, search hackernews for what the user is asking about.",
"Then, ask the article reader to read the links for the stories to get more information.",
"Important: you must provide the article reader with the links to read.",
"Then, ask the web searcher to search for each story to get more information.",
"Finally, provide a thoughtful and engaging summary.",
],
markdown=True,
debug_mode=True,
show_members_responses=True,
)
# Input field for the report query
query = st.text_input("Enter your report query")
if query:
# Get the response from the assistant
response: RunOutput = hackernews_team.run(query, stream=False)
st.write(response.content) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/autonomous_game_playing_agent_apps/ai_tic_tac_toe_agent/utils.py | advanced_ai_agents/autonomous_game_playing_agent_apps/ai_tic_tac_toe_agent/utils.py | from typing import List, Optional, Tuple
import streamlit as st
# Define constants for players
X_PLAYER = "X"
O_PLAYER = "O"
EMPTY = " "
class TicTacToeBoard:
def __init__(self):
# Initialize empty 3x3 board
self.board = [[EMPTY for _ in range(3)] for _ in range(3)]
self.current_player = X_PLAYER
def make_move(self, row: int, col: int) -> Tuple[bool, str]:
"""
Make a move on the board.
Args:
row (int): Row index (0-2)
col (int): Column index (0-2)
Returns:
Tuple[bool, str]: (Success status, Message with current board state or error)
"""
# Validate move coordinates
if not (0 <= row <= 2 and 0 <= col <= 2):
return (
False,
"Invalid move: Position out of bounds. Please choose row and column between 0 and 2.",
)
# Check if position is already occupied
if self.board[row][col] != EMPTY:
return False, f"Invalid move: Position ({row}, {col}) is already occupied."
# Make the move
self.board[row][col] = self.current_player
# Get board state
board_state = self.get_board_state()
# Switch player
self.current_player = O_PLAYER if self.current_player == X_PLAYER else X_PLAYER
return True, f"Move successful!\n{board_state}"
def get_board_state(self) -> str:
"""
Returns a string representation of the current board state.
"""
board_str = "\n-------------\n"
for row in self.board:
board_str += f"| {' | '.join(row)} |\n-------------\n"
return board_str
def check_winner(self) -> Optional[str]:
"""
Check if there's a winner.
Returns:
Optional[str]: The winning player (X or O) or None if no winner
"""
# Check rows
for row in self.board:
if row.count(row[0]) == 3 and row[0] != EMPTY:
return row[0]
# Check columns
for col in range(3):
column = [self.board[row][col] for row in range(3)]
if column.count(column[0]) == 3 and column[0] != EMPTY:
return column[0]
# Check diagonals
diagonal1 = [self.board[i][i] for i in range(3)]
if diagonal1.count(diagonal1[0]) == 3 and diagonal1[0] != EMPTY:
return diagonal1[0]
diagonal2 = [self.board[i][2 - i] for i in range(3)]
if diagonal2.count(diagonal2[0]) == 3 and diagonal2[0] != EMPTY:
return diagonal2[0]
return None
def is_board_full(self) -> bool:
"""
Check if the board is full (draw condition).
"""
return all(cell != EMPTY for row in self.board for cell in row)
def get_valid_moves(self) -> List[Tuple[int, int]]:
"""
Get a list of valid moves (empty positions).
Returns:
List[Tuple[int, int]]: List of (row, col) tuples representing valid moves
"""
valid_moves = []
for row in range(3):
for col in range(3):
if self.board[row][col] == EMPTY:
valid_moves.append((row, col))
return valid_moves
def get_game_state(self) -> Tuple[bool, str]:
"""
Get the current game state.
Returns:
Tuple[bool, str]: (is_game_over, status_message)
"""
winner = self.check_winner()
if winner:
return True, f"Player {winner} wins!"
if self.is_board_full():
return True, "It's a draw!"
return False, "Game in progress"
def display_board(board: TicTacToeBoard):
"""Display the Tic Tac Toe board using Streamlit"""
board_html = '<div class="game-board">'
for i in range(3):
for j in range(3):
cell_value = board.board[i][j]
board_html += f'<div class="board-cell">{cell_value}</div>'
board_html += "</div>"
st.markdown(board_html, unsafe_allow_html=True)
def show_agent_status(agent_name: str, status: str):
"""Display the current agent status"""
st.markdown(
f"""<div class="agent-status">
🤖 <b>{agent_name}</b>: {status}
</div>""",
unsafe_allow_html=True,
)
def create_mini_board_html(
board_state: list, highlight_pos: tuple = None, is_player1: bool = True
) -> str:
"""Create HTML for a mini board with player-specific highlighting"""
html = '<div class="mini-board">'
for i in range(3):
for j in range(3):
highlight = (
f"highlight player{1 if is_player1 else 2}"
if highlight_pos and (i, j) == highlight_pos
else ""
)
html += f'<div class="mini-cell {highlight}">{board_state[i][j]}</div>'
html += "</div>"
return html
def display_move_history():
"""Display the move history with mini boards in two columns"""
st.markdown(
'<h3 style="margin-bottom: 30px;">📜 Game History</h3>',
unsafe_allow_html=True,
)
history_container = st.empty()
if "move_history" in st.session_state and st.session_state.move_history:
# Split moves into player 1 and player 2 moves
p1_moves = []
p2_moves = []
current_board = [[" " for _ in range(3)] for _ in range(3)]
# Process all moves first
for move in st.session_state.move_history:
row, col = map(int, move["move"].split(","))
is_player1 = "Player 1" in move["player"]
symbol = "X" if is_player1 else "O"
current_board[row][col] = symbol
board_copy = [row[:] for row in current_board]
move_html = f"""<div class="move-entry player{1 if is_player1 else 2}">
{create_mini_board_html(board_copy, (row, col), is_player1)}
<div class="move-info">
<div class="move-number player{1 if is_player1 else 2}">Move #{move["number"]}</div>
<div>{move["player"]}</div>
<div style="font-size: 0.9em; color: #888">Position: ({row}, {col})</div>
</div>
</div>"""
if is_player1:
p1_moves.append(move_html)
else:
p2_moves.append(move_html)
max_moves = max(len(p1_moves), len(p2_moves))
history_content = '<div class="history-grid">'
# Left column (Player 1)
history_content += '<div class="history-column-left">'
for i in range(max_moves):
entry_html = ""
# Player 1 move
if i < len(p1_moves):
entry_html += p1_moves[i]
history_content += entry_html
history_content += "</div>"
# Right column (Player 2)
history_content += '<div class="history-column-right">'
for i in range(max_moves):
entry_html = ""
# Player 2 move
if i < len(p2_moves):
entry_html += p2_moves[i]
history_content += entry_html
history_content += "</div>"
history_content += "</div>"
# Display the content
history_container.markdown(history_content, unsafe_allow_html=True)
else:
history_container.markdown(
"""<div style="text-align: center; color: #666; padding: 20px;">
No moves yet. Start the game to see the history!
</div>""",
unsafe_allow_html=True,
)
CUSTOM_CSS = """
<style>
/* Main Styles */
.main-title {
text-align: center;
background: linear-gradient(45deg, #FF4B2B, #FF416C);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
font-size: 3em;
font-weight: bold;
padding: 0.5em 0;
}
.subtitle {
text-align: center;
color: #666;
margin-bottom: 1em;
}
.game-board {
display: grid;
grid-template-columns: repeat(3, 80px);
gap: 5px;
justify-content: center;
margin: 1em auto;
background: #666;
padding: 5px;
border-radius: 8px;
width: fit-content;
}
.board-cell {
width: 80px;
height: 80px;
display: flex;
align-items: center;
justify-content: center;
font-size: 2em;
font-weight: bold;
background-color: #2b2b2b;
color: #fff;
transition: all 0.3s ease;
margin: 0;
padding: 0;
}
.board-cell:hover {
background-color: #3b3b3b;
}
.agent-status {
background-color: #1e1e1e;
border-left: 4px solid #4CAF50;
padding: 10px;
margin: 10px auto;
border-radius: 4px;
max-width: 600px;
text-align: center;
}
.agent-thinking {
display: flex;
justify-content: center;
background-color: #2b2b2b;
padding: 10px;
border-radius: 5px;
margin: 10px auto;
border-left: 4px solid #FFA500;
max-width: 600px;
}
.move-history {
background-color: #2b2b2b;
padding: 15px;
border-radius: 10px;
margin: 10px 0;
}
.thinking-container {
position: fixed;
bottom: 20px;
left: 50%;
z-index: 1000;
min-width: 300px;
}
.agent-thinking {
background-color: rgba(43, 43, 43, 0.95);
border: 1px solid #4CAF50;
box-shadow: 0 2px 10px rgba(0,0,0,0.3);
}
/* Move History Updates */
.history-header {
text-align: center;
margin-bottom: 30px;
}
.history-grid {
display: grid;
grid-template-columns: 1fr 1fr;
gap: 20px; /* Controls spacing between columns */
width: 100%;
margin: 0; /* Remove left/right margins */
padding: 0;
}
.history-column-left,
.history-column-right {
display: flex;
flex-direction: column;
align-items: flex-start; /* Ensures columns fill available space nicely */
margin: 0;
padding: 0;
width: 100%;
}
.move-entry {
display: flex;
align-items: center;
padding: 12px;
margin: 8px 0;
background-color: #2b2b2b;
border-radius: 4px;
width: 100%; /* Removed fixed width so entries span the column */
box-sizing: border-box;
}
.move-entry.player1 {
border-left: 4px solid #4CAF50;
}
.move-entry.player2 {
border-left: 4px solid #f44336;
}
/* Mini-board styling inside moves */
.mini-board {
display: grid;
grid-template-columns: repeat(3, 25px);
gap: 2px;
background: #444;
padding: 2px;
border-radius: 4px;
margin-right: 15px;
}
.mini-cell {
width: 25px;
height: 25px;
display: flex;
align-items: center;
justify-content: center;
font-size: 14px;
font-weight: bold;
background-color: #2b2b2b;
color: #fff;
}
.mini-cell.highlight.player1 {
background-color: #4CAF50;
color: white;
}
.mini-cell.highlight.player2 {
background-color: #f44336;
color: white;
}
/* Move info styling */
.move-info {
flex-grow: 1;
padding-left: 12px;
}
.move-number {
font-weight: bold;
margin-right: 10px;
}
.move-number.player1 {
color: #4CAF50;
}
.move-number.player2 {
color: #f44336;
}
</style>
"""
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/autonomous_game_playing_agent_apps/ai_tic_tac_toe_agent/agents.py | advanced_ai_agents/autonomous_game_playing_agent_apps/ai_tic_tac_toe_agent/agents.py | """
Tic Tac Toe Battle
---------------------------------
This example shows how to build a Tic Tac Toe game where two AI agents play against each other.
The game features a referee agent coordinating between two player agents using different
language models.
Usage Examples:
---------------
1. Quick game with default settings:
referee_agent = get_tic_tac_toe_referee()
play_tic_tac_toe()
2. Game with debug mode off:
referee_agent = get_tic_tac_toe_referee(debug_mode=False)
play_tic_tac_toe(debug_mode=False)
The game integrates:
- Multiple AI models (Claude, GPT-4, etc.)
- Turn-based gameplay coordination
- Move validation and game state management
"""
import sys
from pathlib import Path
from textwrap import dedent
from typing import Tuple
from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.models.google import Gemini
from agno.models.groq import Groq
from agno.models.openai import OpenAIChat
project_root = str(Path(__file__).parent.parent.parent.parent)
if project_root not in sys.path:
sys.path.append(project_root)
def get_model_for_provider(provider: str, model_name: str):
"""
Creates and returns the appropriate model instance based on the provider.
Args:
provider: The model provider (e.g., 'openai', 'google', 'anthropic', 'groq')
model_name: The specific model name/ID
Returns:
An instance of the appropriate model class
Raises:
ValueError: If the provider is not supported
"""
if provider == "openai":
return OpenAIChat(id=model_name)
elif provider == "google":
return Gemini(id=model_name)
elif provider == "anthropic":
if model_name == "claude-3-5-sonnet":
return Claude(id="claude-3-5-sonnet-20241022", max_tokens=8192)
elif model_name == "claude-3-7-sonnet":
return Claude(
id="claude-3-7-sonnet-20250219",
max_tokens=8192,
)
elif model_name == "claude-3-7-sonnet-thinking":
return Claude(
id="claude-3-7-sonnet-20250219",
max_tokens=8192,
thinking={"type": "enabled", "budget_tokens": 4096},
)
else:
return Claude(id=model_name)
elif provider == "groq":
return Groq(id=model_name)
else:
raise ValueError(f"Unsupported model provider: {provider}")
def get_tic_tac_toe_players(
model_x: str = "openai:gpt-4o",
model_o: str = "openai:o3-mini",
debug_mode: bool = True,
) -> Tuple[Agent, Agent]:
"""
Returns an instance of the Tic Tac Toe Referee Agent that coordinates the game.
Args:
model_x: ModelConfig for player X
model_o: ModelConfig for player O
model_referee: ModelConfig for the referee agent
debug_mode: Enable logging and debug features
Returns:
An instance of the configured Referee Agent
"""
# Parse model provider and name
provider_x, model_name_x = model_x.split(":")
provider_o, model_name_o = model_o.split(":")
# Create model instances using the helper function
model_x = get_model_for_provider(provider_x, model_name_x)
model_o = get_model_for_provider(provider_o, model_name_o)
player_x = Agent(
name="Player X",
description=dedent("""\
You are Player X in a Tic Tac Toe game. Your goal is to win by placing three X's in a row (horizontally, vertically, or diagonally).
BOARD LAYOUT:
- The board is a 3x3 grid with coordinates from (0,0) to (2,2)
- Top-left is (0,0), bottom-right is (2,2)
RULES:
- You can only place X in empty spaces (shown as " " on the board)
- Players take turns placing their marks
- First to get 3 marks in a row (horizontal, vertical, or diagonal) wins
- If all spaces are filled with no winner, the game is a draw
YOUR RESPONSE:
- Provide ONLY two numbers separated by a space (row column)
- Example: "1 2" places your X in row 1, column 2
- Choose only from the valid moves list provided to you
STRATEGY TIPS:
- Study the board carefully and make strategic moves
- Block your opponent's potential winning moves
- Create opportunities for multiple winning paths
- Pay attention to the valid moves and avoid illegal moves
"""),
model=model_x,
debug_mode=debug_mode,
)
player_o = Agent(
name="Player O",
description=dedent("""\
You are Player O in a Tic Tac Toe game. Your goal is to win by placing three O's in a row (horizontally, vertically, or diagonally).
BOARD LAYOUT:
- The board is a 3x3 grid with coordinates from (0,0) to (2,2)
- Top-left is (0,0), bottom-right is (2,2)
RULES:
- You can only place O in empty spaces (shown as " " on the board)
- Players take turns placing their marks
- First to get 3 marks in a row (horizontal, vertical, or diagonal) wins
- If all spaces are filled with no winner, the game is a draw
YOUR RESPONSE:
- Provide ONLY two numbers separated by a space (row column)
- Example: "1 2" places your O in row 1, column 2
- Choose only from the valid moves list provided to you
STRATEGY TIPS:
- Study the board carefully and make strategic moves
- Block your opponent's potential winning moves
- Create opportunities for multiple winning paths
- Pay attention to the valid moves and avoid illegal moves
"""),
model=model_o,
debug_mode=debug_mode,
)
return player_x, player_o
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/autonomous_game_playing_agent_apps/ai_tic_tac_toe_agent/app.py | advanced_ai_agents/autonomous_game_playing_agent_apps/ai_tic_tac_toe_agent/app.py | import nest_asyncio
import streamlit as st
from dotenv import load_dotenv
import os
# Load environment variables from .env file
load_dotenv()
from agents import get_tic_tac_toe_players
from agno.run.agent import RunOutput
from agno.utils.log import logger
from utils import (
CUSTOM_CSS,
TicTacToeBoard,
display_board,
display_move_history,
show_agent_status,
)
nest_asyncio.apply()
# Page configuration
st.set_page_config(
page_title="Agent Tic Tac Toe",
page_icon="🎮",
layout="wide",
initial_sidebar_state="expanded",
)
# Load custom CSS with dark mode support
st.markdown(CUSTOM_CSS, unsafe_allow_html=True)
def main():
####################################################################
# Check for required API keys
####################################################################
required_keys_info = {
"gpt-4o": "OPENAI_API_KEY",
"o3-mini": "OPENAI_API_KEY",
"claude-3.5": "ANTHROPIC_API_KEY",
"claude-3.7": "ANTHROPIC_API_KEY",
"claude-3.7-thinking": "ANTHROPIC_API_KEY",
"gemini-flash": "GOOGLE_API_KEY",
"gemini-pro": "GOOGLE_API_KEY",
"llama-3.3": "GROQ_API_KEY",
}
####################################################################
# App header
####################################################################
st.markdown(
"<h1 class='main-title'>Watch Agents play Tic Tac Toe</h1>",
unsafe_allow_html=True,
)
####################################################################
# Initialize session state
####################################################################
if "game_started" not in st.session_state:
st.session_state.game_started = False
st.session_state.game_paused = False
st.session_state.move_history = []
with st.sidebar:
st.markdown("### Game Controls")
model_options = {
"gpt-4o": "openai:gpt-4o",
"o3-mini": "openai:o3-mini",
"claude-3.5": "anthropic:claude-3-5-sonnet",
"claude-3.7": "anthropic:claude-3-7-sonnet",
"claude-3.7-thinking": "anthropic:claude-3-7-sonnet-thinking",
"gemini-flash": "google:gemini-2.0-flash",
"gemini-pro": "google:gemini-2.0-pro-exp-02-05",
"llama-3.3": "groq:llama-3.3-70b-versatile",
}
################################################################
# Model selection
################################################################
selected_p_x = st.selectbox(
"Select Player X",
list(model_options.keys()),
index=list(model_options.keys()).index("claude-3.7-thinking"),
key="model_p1",
)
selected_p_o = st.selectbox(
"Select Player O",
list(model_options.keys()),
index=list(model_options.keys()).index("o3-mini"),
key="model_p2",
)
################################################################
# API Key validation
################################################################
missing_keys = []
for model in [selected_p_x, selected_p_o]:
required_key = required_keys_info.get(model)
if required_key and not os.getenv(required_key):
missing_keys.append(f"**{model}** requires `{required_key}`")
if missing_keys:
st.error(f"""
🔑 **Missing API Keys:**
{chr(10).join(f"• {key}" for key in missing_keys)}
**To fix this:**
1. Create a `.env` file in this directory
2. Add your API keys:
```
OPENAI_API_KEY=your_key_here
ANTHROPIC_API_KEY=your_key_here
GOOGLE_API_KEY=your_key_here
GROQ_API_KEY=your_key_here
```
3. Restart the app
""")
################################################################
# Game controls
################################################################
col1, col2 = st.columns(2)
with col1:
if not st.session_state.game_started:
if st.button("▶️ Start Game", disabled=bool(missing_keys)):
st.session_state.player_x, st.session_state.player_o = (
get_tic_tac_toe_players(
model_x=model_options[selected_p_x],
model_o=model_options[selected_p_o],
debug_mode=True,
)
)
st.session_state.game_board = TicTacToeBoard()
st.session_state.game_started = True
st.session_state.game_paused = False
st.session_state.move_history = []
st.rerun()
else:
game_over, _ = st.session_state.game_board.get_game_state()
if not game_over:
if st.button(
"⏸️ Pause" if not st.session_state.game_paused else "▶️ Resume"
):
st.session_state.game_paused = not st.session_state.game_paused
st.rerun()
with col2:
if st.session_state.game_started:
if st.button("🔄 New Game"):
st.session_state.player_x, st.session_state.player_o = (
get_tic_tac_toe_players(
model_x=model_options[selected_p_x],
model_o=model_options[selected_p_o],
debug_mode=True,
)
)
st.session_state.game_board = TicTacToeBoard()
st.session_state.game_paused = False
st.session_state.move_history = []
st.rerun()
####################################################################
# Header showing current models
####################################################################
if st.session_state.game_started:
st.markdown(
f"<h3 style='color:#87CEEB; text-align:center;'>{selected_p_x} vs {selected_p_o}</h3>",
unsafe_allow_html=True,
)
####################################################################
# Main game area
####################################################################
if st.session_state.game_started:
game_over, status = st.session_state.game_board.get_game_state()
display_board(st.session_state.game_board)
# Show game status (winner/draw/current player)
if game_over:
winner_player = (
"X" if "X wins" in status else "O" if "O wins" in status else None
)
if winner_player:
winner_num = "1" if winner_player == "X" else "2"
winner_model = selected_p_x if winner_player == "X" else selected_p_o
st.success(f"🏆 Game Over! Player {winner_num} ({winner_model}) wins!")
else:
st.info("🤝 Game Over! It's a draw!")
else:
# Show current player status
current_player = st.session_state.game_board.current_player
player_num = "1" if current_player == "X" else "2"
current_model_name = selected_p_x if current_player == "X" else selected_p_o
show_agent_status(
f"Player {player_num} ({current_model_name})",
"It's your turn",
)
display_move_history()
if not st.session_state.game_paused and not game_over:
# Thinking indicator
st.markdown(
f"""<div class="thinking-container">
<div class="agent-thinking">
<div style="margin-right: 10px; display: inline-block;">🔄</div>
Player {player_num} ({current_model_name}) is thinking...
</div>
</div>""",
unsafe_allow_html=True,
)
valid_moves = st.session_state.game_board.get_valid_moves()
current_agent = (
st.session_state.player_x
if current_player == "X"
else st.session_state.player_o
)
response: RunOutput = current_agent.run(
f"""\
Current board state:\n{st.session_state.game_board.get_board_state()}\n
Available valid moves (row, col): {valid_moves}\n
Choose your next move from the valid moves above.
Respond with ONLY two numbers for row and column, e.g. "1 2".""",
stream=False,
)
try:
import re
numbers = re.findall(r"\d+", response.content if response else "")
row, col = map(int, numbers[:2])
success, message = st.session_state.game_board.make_move(row, col)
if success:
move_number = len(st.session_state.move_history) + 1
st.session_state.move_history.append(
{
"number": move_number,
"player": f"Player {player_num} ({current_model_name})",
"move": f"{row},{col}",
}
)
logger.info(
f"Move {move_number}: Player {player_num} ({current_model_name}) placed at position ({row}, {col})"
)
logger.info(
f"Board state:\n{st.session_state.game_board.get_board_state()}"
)
# Check game state after move
game_over, status = st.session_state.game_board.get_game_state()
if game_over:
logger.info(f"Game Over - {status}")
if "wins" in status:
st.success(f"🏆 Game Over! {status}")
else:
st.info(f"🤝 Game Over! {status}")
st.session_state.game_paused = True
st.rerun()
else:
logger.error(f"Invalid move attempt: {message}")
response: RunOutput = current_agent.run(
f"""\
Invalid move: {message}
Current board state:\n{st.session_state.game_board.get_board_state()}\n
Available valid moves (row, col): {valid_moves}\n
Please choose a valid move from the list above.
Respond with ONLY two numbers for row and column, e.g. "1 2".""",
stream=False,
)
st.rerun()
except Exception as e:
logger.error(f"Error processing move: {str(e)}")
st.error(f"Error processing move: {str(e)}")
st.rerun()
else:
st.info("👈 Press 'Start Game' to begin!")
####################################################################
# About section
####################################################################
st.sidebar.markdown(f"""
### 🎮 Agent Tic Tac Toe Battle
Watch two agents compete in real-time!
**Current Players:**
* 🔵 Player X: `{selected_p_x}`
* 🔴 Player O: `{selected_p_o}`
**How it Works:**
Each Agent analyzes the board and employs strategic thinking to:
* 🏆 Find winning moves
* 🛡️ Block opponent victories
* ⭐ Control strategic positions
* 🤔 Plan multiple moves ahead
Built with Streamlit and Agno
""")
if __name__ == "__main__":
main()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/autonomous_game_playing_agent_apps/ai_chess_agent/ai_chess_agent.py | advanced_ai_agents/autonomous_game_playing_agent_apps/ai_chess_agent/ai_chess_agent.py | import chess
import chess.svg
import streamlit as st
from autogen import ConversableAgent, register_function
if "openai_api_key" not in st.session_state:
st.session_state.openai_api_key = None
if "board" not in st.session_state:
st.session_state.board = chess.Board()
if "made_move" not in st.session_state:
st.session_state.made_move = False
if "board_svg" not in st.session_state:
st.session_state.board_svg = None
if "move_history" not in st.session_state:
st.session_state.move_history = []
if "max_turns" not in st.session_state:
st.session_state.max_turns = 5
st.sidebar.title("Chess Agent Configuration")
openai_api_key = st.sidebar.text_input("Enter your OpenAI API key:", type="password")
if openai_api_key:
st.session_state.openai_api_key = openai_api_key
st.sidebar.success("API key saved!")
st.sidebar.info("""
For a complete chess game with potential checkmate, it would take max_turns > 200 approximately.
However, this will consume significant API credits and a lot of time.
For demo purposes, using 5-10 turns is recommended.
""")
max_turns_input = st.sidebar.number_input(
"Enter the number of turns (max_turns):",
min_value=1,
max_value=1000,
value=st.session_state.max_turns,
step=1
)
if max_turns_input:
st.session_state.max_turns = max_turns_input
st.sidebar.success(f"Max turns of total chess moves set to {st.session_state.max_turns}!")
st.title("Chess with AutoGen Agents")
def available_moves() -> str:
available_moves = [str(move) for move in st.session_state.board.legal_moves]
return "Available moves are: " + ",".join(available_moves)
def execute_move(move: str) -> str:
try:
chess_move = chess.Move.from_uci(move)
if chess_move not in st.session_state.board.legal_moves:
return f"Invalid move: {move}. Please call available_moves() to see valid moves."
# Update board state
st.session_state.board.push(chess_move)
st.session_state.made_move = True
# Generate and store board visualization
board_svg = chess.svg.board(st.session_state.board,
arrows=[(chess_move.from_square, chess_move.to_square)],
fill={chess_move.from_square: "gray"},
size=400)
st.session_state.board_svg = board_svg
st.session_state.move_history.append(board_svg)
# Get piece information
moved_piece = st.session_state.board.piece_at(chess_move.to_square)
piece_unicode = moved_piece.unicode_symbol()
piece_type_name = chess.piece_name(moved_piece.piece_type)
piece_name = piece_type_name.capitalize() if piece_unicode.isupper() else piece_type_name
# Generate move description
from_square = chess.SQUARE_NAMES[chess_move.from_square]
to_square = chess.SQUARE_NAMES[chess_move.to_square]
move_desc = f"Moved {piece_name} ({piece_unicode}) from {from_square} to {to_square}."
if st.session_state.board.is_checkmate():
winner = 'White' if st.session_state.board.turn == chess.BLACK else 'Black'
move_desc += f"\nCheckmate! {winner} wins!"
elif st.session_state.board.is_stalemate():
move_desc += "\nGame ended in stalemate!"
elif st.session_state.board.is_insufficient_material():
move_desc += "\nGame ended - insufficient material to checkmate!"
elif st.session_state.board.is_check():
move_desc += "\nCheck!"
return move_desc
except ValueError:
return f"Invalid move format: {move}. Please use UCI format (e.g., 'e2e4')."
def check_made_move(msg):
if st.session_state.made_move:
st.session_state.made_move = False
return True
else:
return False
if st.session_state.openai_api_key:
try:
agent_white_config_list = [
{
"model": "gpt-4o-mini",
"api_key": st.session_state.openai_api_key,
},
]
agent_black_config_list = [
{
"model": "gpt-4o-mini",
"api_key": st.session_state.openai_api_key,
},
]
agent_white = ConversableAgent(
name="Agent_White",
system_message="You are a professional chess player and you play as white. "
"First call available_moves() first, to get list of legal available moves. "
"Then call execute_move(move) to make a move.",
llm_config={"config_list": agent_white_config_list, "cache_seed": None},
)
agent_black = ConversableAgent(
name="Agent_Black",
system_message="You are a professional chess player and you play as black. "
"First call available_moves() first, to get list of legal available moves. "
"Then call execute_move(move) to make a move.",
llm_config={"config_list": agent_black_config_list, "cache_seed": None},
)
game_master = ConversableAgent(
name="Game_Master",
llm_config=False,
is_termination_msg=check_made_move,
default_auto_reply="Please make a move.",
human_input_mode="NEVER",
)
register_function(
execute_move,
caller=agent_white,
executor=game_master,
name="execute_move",
description="Call this tool to make a move.",
)
register_function(
available_moves,
caller=agent_white,
executor=game_master,
name="available_moves",
description="Get legal moves.",
)
register_function(
execute_move,
caller=agent_black,
executor=game_master,
name="execute_move",
description="Call this tool to make a move.",
)
register_function(
available_moves,
caller=agent_black,
executor=game_master,
name="available_moves",
description="Get legal moves.",
)
agent_white.register_nested_chats(
trigger=agent_black,
chat_queue=[
{
"sender": game_master,
"recipient": agent_white,
"summary_method": "last_msg",
}
],
)
agent_black.register_nested_chats(
trigger=agent_white,
chat_queue=[
{
"sender": game_master,
"recipient": agent_black,
"summary_method": "last_msg",
}
],
)
st.info("""
This chess game is played between two AG2 AI agents:
- **Agent White**: A GPT-4o-mini powered chess player controlling white pieces
- **Agent Black**: A GPT-4o-mini powered chess player controlling black pieces
The game is managed by a **Game Master** that:
- Validates all moves
- Updates the chess board
- Manages turn-taking between players
- Provides legal move information
""")
initial_board_svg = chess.svg.board(st.session_state.board, size=300)
st.subheader("Initial Board")
st.image(initial_board_svg)
if st.button("Start Game"):
st.session_state.board.reset()
st.session_state.made_move = False
st.session_state.move_history = []
st.session_state.board_svg = chess.svg.board(st.session_state.board, size=300)
st.info("The AI agents will now play against each other. Each agent will analyze the board, "
"request legal moves from the Game Master (proxy agent), and make strategic decisions.")
st.success("You can view the interaction between the agents in the terminal output, after the turns between agents end, you get view all the chess board moves displayed below!")
st.write("Game started! White's turn.")
chat_result = agent_black.initiate_chat(
recipient=agent_white,
message="Let's play chess! You go first, its your move.",
max_turns=st.session_state.max_turns,
summary_method="reflection_with_llm"
)
st.markdown(chat_result.summary)
# Display the move history (boards for each move)
st.subheader("Move History")
for i, move_svg in enumerate(st.session_state.move_history):
# Determine which agent made the move
if i % 2 == 0:
move_by = "Agent White" # Even-indexed moves are by White
else:
move_by = "Agent Black" # Odd-indexed moves are by Black
st.write(f"Move {i + 1} by {move_by}")
st.image(move_svg)
if st.button("Reset Game"):
st.session_state.board.reset()
st.session_state.made_move = False
st.session_state.move_history = []
st.session_state.board_svg = None
st.write("Game reset! Click 'Start Game' to begin a new game.")
except Exception as e:
st.error(f"An error occurred: {e}. Please check your API key and try again.")
else:
st.warning("Please enter your OpenAI API key in the sidebar to start the game.") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/autonomous_game_playing_agent_apps/ai_3dpygame_r1/ai_3dpygame_r1.py | advanced_ai_agents/autonomous_game_playing_agent_apps/ai_3dpygame_r1/ai_3dpygame_r1.py | import streamlit as st
from openai import OpenAI
from agno.agent import Agent as AgnoAgent
from agno.run.agent import RunOutput
from agno.models.openai import OpenAIChat as AgnoOpenAIChat
from langchain_openai import ChatOpenAI
import asyncio
from browser_use import Browser
st.set_page_config(page_title="PyGame Code Generator", layout="wide")
# Initialize session state
if "api_keys" not in st.session_state:
st.session_state.api_keys = {
"deepseek": "",
"openai": ""
}
# Streamlit sidebar for API keys
with st.sidebar:
st.title("API Keys Configuration")
st.session_state.api_keys["deepseek"] = st.text_input(
"DeepSeek API Key",
type="password",
value=st.session_state.api_keys["deepseek"]
)
st.session_state.api_keys["openai"] = st.text_input(
"OpenAI API Key",
type="password",
value=st.session_state.api_keys["openai"]
)
st.markdown("---")
st.info("""
📝 How to use:
1. Enter your API keys above
2. Write your PyGame visualization query
3. Click 'Generate Code' to get the code
4. Click 'Generate Visualization' to:
- Open Trinket.io PyGame editor
- Copy and paste the generated code
- Watch it run automatically
""")
# Main UI
st.title("🎮 AI 3D Visualizer with DeepSeek R1")
example_query = "Create a particle system simulation where 100 particles emit from the mouse position and respond to keyboard-controlled wind forces"
query = st.text_area(
"Enter your PyGame query:",
height=70,
placeholder=f"e.g.: {example_query}"
)
# Split the buttons into columns
col1, col2 = st.columns(2)
generate_code_btn = col1.button("Generate Code")
generate_vis_btn = col2.button("Generate Visualization")
if generate_code_btn and query:
if not st.session_state.api_keys["deepseek"] or not st.session_state.api_keys["openai"]:
st.error("Please provide both API keys in the sidebar")
st.stop()
# Initialize Deepseek client
deepseek_client = OpenAI(
api_key=st.session_state.api_keys["deepseek"],
base_url="https://api.deepseek.com"
)
system_prompt = """You are a Pygame and Python Expert that specializes in making games and visualisation through pygame and python programming.
During your reasoning and thinking, include clear, concise, and well-formatted Python code in your reasoning.
Always include explanations for the code you provide."""
try:
# Get reasoning from Deepseek
with st.spinner("Generating solution..."):
deepseek_response = deepseek_client.chat.completions.create(
model="deepseek-reasoner",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": query}
],
max_tokens=1
)
reasoning_content = deepseek_response.choices[0].message.reasoning_content
print("\nDeepseek Reasoning:\n", reasoning_content)
with st.expander("R1's Reasoning"):
st.write(reasoning_content)
# Initialize OpenAI agent
openai_agent = AgnoAgent(
model=AgnoOpenAIChat(
id="gpt-4o",
api_key=st.session_state.api_keys["openai"]
),
debug_mode=True,
markdown=True
)
# Extract code
extraction_prompt = f"""Extract ONLY the Python code from the following content which is reasoning of a particular query to make a pygame script.
Return nothing but the raw code without any explanations, or markdown backticks:
{reasoning_content}"""
with st.spinner("Extracting code..."):
code_response: RunOutput = openai_agent.run(extraction_prompt)
extracted_code = code_response.content
# Store the generated code in session state
st.session_state.generated_code = extracted_code
# Display the code
with st.expander("Generated PyGame Code", expanded=True):
st.code(extracted_code, language="python")
st.success("Code generated successfully! Click 'Generate Visualization' to run it.")
except Exception as e:
st.error(f"An error occurred: {str(e)}")
elif generate_vis_btn:
if "generated_code" not in st.session_state:
st.warning("Please generate code first before visualization")
else:
async def run_pygame_on_trinket(code: str) -> None:
browser = Browser()
from browser_use import Agent
async with await browser.new_context() as context:
model = ChatOpenAI(
model="gpt-4o",
api_key=st.session_state.api_keys["openai"]
)
agent1 = Agent(
task='Go to https://trinket.io/features/pygame, thats your only job.',
llm=model,
browser_context=context,
)
executor = Agent(
task='Executor. Execute the code written by the User by clicking on the run button on the right. ',
llm=model,
browser_context=context
)
coder = Agent(
task='Coder. Your job is to wait for the user for 10 seconds to write the code in the code editor.',
llm=model,
browser_context=context
)
viewer = Agent(
task='Viewer. Your job is to just view the pygame window for 10 seconds.',
llm=model,
browser_context=context,
)
with st.spinner("Running code on Trinket..."):
try:
await agent1.run()
await coder.run()
await executor.run()
await viewer.run()
st.success("Code is running on Trinket!")
except Exception as e:
st.error(f"Error running code on Trinket: {str(e)}")
st.info("You can still copy the code above and run it manually on Trinket")
# Run the async function with the stored code
asyncio.run(run_pygame_on_trinket(st.session_state.generated_code))
elif generate_code_btn and not query:
st.warning("Please enter a query before generating code") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_investment_agent/investment_agent.py | advanced_ai_agents/single_agent_apps/ai_investment_agent/investment_agent.py | # import necessary python libraries
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.yfinance import YFinanceTools
from agno.os import AgentOS
# create the AI investment agent
agent = Agent(
name="AI Investment Agent",
model=OpenAIChat(id="gpt-5.2-2025-12-11"),
tools=[YFinanceTools()],
description="You are an investment analyst that researches stock prices, analyst recommendations, and stock fundamentals.",
instructions=[
"Format your response using markdown and use tables to display data where possible.",
"When comparing stocks, provide detailed analysis including price trends, fundamentals, and analyst recommendations.",
"Always provide actionable insights for investors."
],
debug_mode=True,
markdown=True,
)
# UI for investment agent using AgentOS
agent_os = AgentOS(agents=[agent])
app = agent_os.get_app()
if __name__ == "__main__":
agent_os.serve(app="investment_agent:app", reload=True)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_system_architect_r1/ai_system_architect_r1.py | advanced_ai_agents/single_agent_apps/ai_system_architect_r1/ai_system_architect_r1.py | from typing import Optional, List, Dict, Any, Union
import os
import time
import streamlit as st
from openai import OpenAI
import anthropic
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from enum import Enum
import json
from agno.agent import Agent
from agno.run.agent import RunOutput
from agno.models.anthropic import Claude
# Model Constants
DEEPSEEK_MODEL: str = "deepseek-reasoner"
CLAUDE_MODEL: str = "claude-3-5-sonnet-20241022"
class ArchitecturePattern(str, Enum):
"""Architectural patterns for system design."""
MICROSERVICES = "microservices" # Decomposed into small, independent services
MONOLITHIC = "monolithic" # Single, unified codebase
SERVERLESS = "serverless" # Function-as-a-Service architecture
EVENT_DRIVEN = "event_driven" # Asynchronous event-based communication
class DatabaseType(str, Enum):
"""Types of database systems."""
SQL = "sql" # Relational databases with ACID properties
NOSQL = "nosql" # Non-relational databases for flexible schemas
HYBRID = "hybrid" # Combined SQL and NoSQL approach
class ComplianceStandard(str, Enum):
"""Regulatory compliance standards."""
HIPAA = "hipaa" # Healthcare data protection
GDPR = "gdpr" # EU data privacy regulation
SOC2 = "soc2" # Service organization security controls
ISO27001 = "iso27001" # Information security management
class ArchitectureDecision(BaseModel):
"""Represents architectural decisions and their justifications."""
pattern: ArchitecturePattern
rationale: str = Field(..., min_length=50) # Detailed explanation for the choice
trade_offs: Dict[str, List[str]] = Field(..., alias="trade_offs") # Pros and cons
estimated_cost: Dict[str, float] # Cost breakdown
class SecurityMeasure(BaseModel):
"""Security controls and implementation details."""
measure_type: str # Type of security measure
implementation_priority: int = Field(..., ge=1, le=5) # Priority level 1-5
compliance_standards: List[ComplianceStandard] # Applicable standards
data_classification: str # Data sensitivity level
class InfrastructureResource(BaseModel):
"""Infrastructure components and specifications."""
resource_type: str # Type of infrastructure resource
specifications: Dict[str, str] # Technical specifications
scaling_policy: Dict[str, str] # Scaling rules and thresholds
estimated_cost: float # Estimated cost per resource
class TechnicalAnalysis(BaseModel):
"""Complete technical analysis of the system architecture."""
architecture_decision: ArchitectureDecision # Core architecture choices
infrastructure_resources: List[InfrastructureResource] # Required resources
security_measures: List[SecurityMeasure] # Security controls
database_choice: DatabaseType # Database architecture
compliance_requirements: List[ComplianceStandard] = [] # Required standards
performance_requirements: List[Dict[str, Union[str, float]]] = [] # Performance metrics
risk_assessment: Dict[str, str] = {} # Identified risks and mitigations
class ModelChain:
def __init__(self, deepseek_api_key: str, anthropic_api_key: str) -> None:
self.client = OpenAI(
api_key=deepseek_api_key,
base_url="https://api.deepseek.com"
)
self.claude_client = anthropic.Anthropic(api_key=anthropic_api_key)
# Create Claude model with system prompt
claude_model = Claude(
id="claude-3-5-sonnet-20241022",
api_key=anthropic_api_key,
system_prompt="""Given the user's query and the DeepSeek reasoning:
1. Provide a detailed analysis of the architecture decisions
2. Generate a project implementation roadmap
3. Create a comprehensive technical specification document
4. Format the output in clean markdown with proper sections
5. Include diagrams descriptions in mermaid.js format"""
)
# Initialize agent with configured model
self.agent = Agent(
model=claude_model,
markdown=True
)
self.deepseek_messages: List[Dict[str, str]] = []
self.claude_messages: List[Dict[str, Any]] = []
self.current_model: str = CLAUDE_MODEL
def get_deepseek_reasoning(self, user_input: str) -> tuple[str, str]:
start_time = time.time()
system_prompt = """You are an expert software architect and technical advisor. Analyze the user's project requirements
and provide structured reasoning about architecture, tools, and implementation strategies.
IMPORTANT: Reason why you are choosing a particular architecture pattern, database type, etc. for user understanding in your reasoning.
IMPORTANT: Your response must be a valid JSON object (not a string or any other format) that matches the schema provided below.
Do not include any explanatory text, markdown formatting, or code blocks - only return the JSON object.
Schema:
{
"architecture_decision": {
"pattern": "one of: microservices|monolithic|serverless|event_driven|layered",
"rationale": "string",
"trade_offs": {"advantage": ["list of strings"], "disadvantage": ["list of strings"]},
"estimated_cost": {"implementation": float, "maintenance": float}
},
"infrastructure_resources": [{
"resource_type": "string",
"specifications": {"key": "value"},
"scaling_policy": {"key": "value"},
"estimated_cost": float
}],
"security_measures": [{
"measure_type": "string",
"implementation_priority": "integer 1-5",
"compliance_standards": ["hipaa", "gdpr", "soc2", "hitech", "iso27001", "pci_dss"],
"estimated_setup_time_days": "integer",
"data_classification": "one of: protected_health_information|personally_identifiable_information|confidential|public",
"encryption_requirements": {"key": "value"},
"access_control_policy": {"role": ["permissions"]},
"audit_requirements": ["list of strings"]
}],
"database_choice": "one of: sql|nosql|graph|time_series|hybrid",
"ml_capabilities": [{
"model_type": "string",
"training_frequency": "string",
"input_data_types": ["list of strings"],
"performance_requirements": {"metric": float},
"hardware_requirements": {"resource": "specification"},
"regulatory_constraints": ["list of strings"]
}],
"data_integrations": [{
"integration_type": "one of: hl7|fhir|dicom|rest|soap|custom",
"data_format": "string",
"frequency": "string",
"volume": "string",
"security_requirements": {"key": "value"}
}],
"performance_requirements": [{
"metric_name": "string",
"target_value": float,
"measurement_unit": "string",
"priority": "integer 1-5"
}],
"audit_config": {
"log_retention_period": "integer",
"audit_events": ["list of strings"],
"compliance_mapping": {"standard": ["requirements"]}
},
"api_config": {
"version": "string",
"auth_method": "string",
"rate_limits": {"role": "requests_per_minute"},
"documentation_url": "string"
},
"error_handling": {
"retry_policy": {"key": "value"},
"fallback_strategies": ["list of strings"],
"notification_channels": ["list of strings"]
},
"estimated_team_size": "integer",
"critical_path_components": ["list of strings"],
"risk_assessment": {"risk": "mitigation"},
"maintenance_considerations": ["list of strings"],
"compliance_requirements": ["list of compliance standards"],
"data_retention_policy": {"data_type": "retention_period"},
"disaster_recovery": {"key": "value"},
"interoperability_standards": ["list of strings"]
}
Consider scalability, security, maintenance, and technical debt in your analysis.
Focus on practical, modern solutions while being mindful of trade-offs."""
try:
deepseek_response = self.client.chat.completions.create(
model="deepseek-reasoner",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_input}
],
max_tokens=3000,
stream=False
)
reasoning_content = deepseek_response.choices[0].message.reasoning_content
normal_content = deepseek_response.choices[0].message.content
# Display the reasoning separately
with st.expander("DeepSeek Reasoning", expanded=True):
st.markdown(reasoning_content)
with st.expander("💭 Technical Analysis", expanded=True):
st.markdown(normal_content)
elapsed_time = time.time() - start_time
time_str = f"{elapsed_time/60:.1f} minutes" if elapsed_time >= 60 else f"{elapsed_time:.1f} seconds"
st.caption(f"⏱️ Analysis completed in {time_str}")
# Return both reasoning and normal content
return reasoning_content, normal_content
except Exception as e:
st.error(f"Error in DeepSeek analysis: {str(e)}")
return "Error occurred while analyzing", ""
def get_claude_response(self, user_input: str, deepseek_output: tuple[str, str]) -> str:
try:
reasoning_content, normal_content = deepseek_output
# Create expander for Claude's response
with st.expander("🤖 Claude's Response", expanded=True):
response_placeholder = st.empty()
# Prepare the message with user input, reasoning and normal output
message = f"""User Query: {user_input}
DeepSeek Reasoning: {reasoning_content}
DeepSeek Technical Analysis: {normal_content}
Give detailed explanation for each key value pair in brief in the JSON object, and why we chose it clearly. Dont use your own opinions, use the reasoning and the structured output to explain the choices."""
# Use Claude Agent to get response
response: RunOutput = self.agent.run(
message=message
)
dub = response.content
st.markdown(dub)
return dub
except Exception as e:
st.error(f"Error in Claude response: {str(e)}")
return "Error occurred while getting response"
def main() -> None:
"""Main function to run the Streamlit app."""
st.title("🤖 AI System Architect Advisor with R1")
# Add prompt guidance
st.info("""
📝 For best results, structure your prompt with:
1. **Project Context**: Brief description of your project/system
2. **Requirements**: Key functional and non-functional requirements
3. **Constraints**: Any technical, budget, or time constraints
4. **Scale**: Expected user base and growth projections
5. **Security/Compliance**: Any specific security or regulatory needs
Example:
```
I need to build a healthcare data management system that:
- Handles patient records and appointments
- Needs to scale to 10,000 users
- Must be HIPAA compliant
- Budget constraint of $50k for initial setup
- Should integrate with existing hospital systems
```
""")
# Sidebar for API keys
with st.sidebar:
st.header("⚙️ Configuration")
deepseek_api_key = st.text_input("DeepSeek API Key", type="password")
anthropic_api_key = st.text_input("Anthropic API Key", type="password")
if st.button("🗑️ Clear Chat History"):
st.session_state.messages = []
st.rerun()
# Initialize session state for messages
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("What would you like to know?"):
if not deepseek_api_key or not anthropic_api_key:
st.error("⚠️ Please enter both API keys in the sidebar.")
return
# Initialize ModelChain
chain = ModelChain(deepseek_api_key, anthropic_api_key)
# Add user message to chat
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Get AI response
with st.chat_message("assistant"):
with st.spinner("🤔 Thinking..."):
deepseek_output = chain.get_deepseek_reasoning(prompt)
with st.spinner("✍️ Responding..."):
response = chain.get_claude_response(prompt, deepseek_output)
st.session_state.messages.append({"role": "assistant", "content": response})
if __name__ == "__main__":
main() | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_meeting_agent/meeting_agent.py | advanced_ai_agents/single_agent_apps/ai_meeting_agent/meeting_agent.py | import streamlit as st
from crewai import Agent, Task, Crew, LLM
from crewai.process import Process
from crewai_tools import SerperDevTool
import os
# Streamlit app setup
st.set_page_config(page_title="AI Meeting Agent 📝", layout="wide")
st.title("AI Meeting Preparation Agent 📝")
# Sidebar for API keys
st.sidebar.header("API Keys")
anthropic_api_key = st.sidebar.text_input("Anthropic API Key", type="password")
serper_api_key = st.sidebar.text_input("Serper API Key", type="password")
# Check if all API keys are set
if anthropic_api_key and serper_api_key:
# # Set API keys as environment variables
os.environ["ANTHROPIC_API_KEY"] = anthropic_api_key
os.environ["SERPER_API_KEY"] = serper_api_key
claude = LLM(model="claude-3-5-sonnet-20240620", temperature= 0.7, api_key=anthropic_api_key)
search_tool = SerperDevTool()
# Input fields
company_name = st.text_input("Enter the company name:")
meeting_objective = st.text_input("Enter the meeting objective:")
attendees = st.text_area("Enter the attendees and their roles (one per line):")
meeting_duration = st.number_input("Enter the meeting duration (in minutes):", min_value=15, max_value=180, value=60, step=15)
focus_areas = st.text_input("Enter any specific areas of focus or concerns:")
# Define the agents
context_analyzer = Agent(
role='Meeting Context Specialist',
goal='Analyze and summarize key background information for the meeting',
backstory='You are an expert at quickly understanding complex business contexts and identifying critical information.',
verbose=True,
allow_delegation=False,
llm=claude,
tools=[search_tool]
)
industry_insights_generator = Agent(
role='Industry Expert',
goal='Provide in-depth industry analysis and identify key trends',
backstory='You are a seasoned industry analyst with a knack for spotting emerging trends and opportunities.',
verbose=True,
allow_delegation=False,
llm=claude,
tools=[search_tool]
)
strategy_formulator = Agent(
role='Meeting Strategist',
goal='Develop a tailored meeting strategy and detailed agenda',
backstory='You are a master meeting planner, known for creating highly effective strategies and agendas.',
verbose=True,
allow_delegation=False,
llm=claude,
)
executive_briefing_creator = Agent(
role='Communication Specialist',
goal='Synthesize information into concise and impactful briefings',
backstory='You are an expert communicator, skilled at distilling complex information into clear, actionable insights.',
verbose=True,
allow_delegation=False,
llm=claude,
)
# Define the tasks
context_analysis_task = Task(
description=f"""
Analyze the context for the meeting with {company_name}, considering:
1. The meeting objective: {meeting_objective}
2. The attendees: {attendees}
3. The meeting duration: {meeting_duration} minutes
4. Specific focus areas or concerns: {focus_areas}
Research {company_name} thoroughly, including:
1. Recent news and press releases
2. Key products or services
3. Major competitors
Provide a comprehensive summary of your findings, highlighting the most relevant information for the meeting context.
Format your output using markdown with appropriate headings and subheadings.
""",
agent=context_analyzer,
expected_output="A detailed analysis of the meeting context and company background, including recent developments, financial performance, and relevance to the meeting objective, formatted in markdown with headings and subheadings."
)
industry_analysis_task = Task(
description=f"""
Based on the context analysis for {company_name} and the meeting objective: {meeting_objective}, provide an in-depth industry analysis:
1. Identify key trends and developments in the industry
2. Analyze the competitive landscape
3. Highlight potential opportunities and threats
4. Provide insights on market positioning
Ensure the analysis is relevant to the meeting objective and attendees' roles.
Format your output using markdown with appropriate headings and subheadings.
""",
agent=industry_insights_generator,
expected_output="A comprehensive industry analysis report, including trends, competitive landscape, opportunities, threats, and relevant insights for the meeting objective, formatted in markdown with headings and subheadings."
)
strategy_development_task = Task(
description=f"""
Using the context analysis and industry insights, develop a tailored meeting strategy and detailed agenda for the {meeting_duration}-minute meeting with {company_name}. Include:
1. A time-boxed agenda with clear objectives for each section
2. Key talking points for each agenda item
3. Suggested speakers or leaders for each section
4. Potential discussion topics and questions to drive the conversation
5. Strategies to address the specific focus areas and concerns: {focus_areas}
Ensure the strategy and agenda align with the meeting objective: {meeting_objective}
Format your output using markdown with appropriate headings and subheadings.
""",
agent=strategy_formulator,
expected_output="A detailed meeting strategy and time-boxed agenda, including objectives, key talking points, and strategies to address specific focus areas, formatted in markdown with headings and subheadings."
)
executive_brief_task = Task(
description=f"""
Synthesize all the gathered information into a comprehensive yet concise executive brief for the meeting with {company_name}. Create the following components:
1. A detailed one-page executive summary including:
- Clear statement of the meeting objective
- List of key attendees and their roles
- Critical background points about {company_name} and relevant industry context
- Top 3-5 strategic goals for the meeting, aligned with the objective
- Brief overview of the meeting structure and key topics to be covered
2. An in-depth list of key talking points, each supported by:
- Relevant data or statistics
- Specific examples or case studies
- Connection to the company's current situation or challenges
3. Anticipate and prepare for potential questions:
- List likely questions from attendees based on their roles and the meeting objective
- Craft thoughtful, data-driven responses to each question
- Include any supporting information or additional context that might be needed
4. Strategic recommendations and next steps:
- Provide 3-5 actionable recommendations based on the analysis
- Outline clear next steps for implementation or follow-up
- Suggest timelines or deadlines for key actions
- Identify potential challenges or roadblocks and propose mitigation strategies
Ensure the brief is comprehensive yet concise, highly actionable, and precisely aligned with the meeting objective: {meeting_objective}. The document should be structured for easy navigation and quick reference during the meeting.
Format your output using markdown with appropriate headings and subheadings.
""",
agent=executive_briefing_creator,
expected_output="A comprehensive executive brief including summary, key talking points, Q&A preparation, and strategic recommendations, formatted in markdown with main headings (H1), section headings (H2), and subsection headings (H3) where appropriate. Use bullet points, numbered lists, and emphasis (bold/italic) for key information."
)
# Create the crew
meeting_prep_crew = Crew(
agents=[context_analyzer, industry_insights_generator, strategy_formulator, executive_briefing_creator],
tasks=[context_analysis_task, industry_analysis_task, strategy_development_task, executive_brief_task],
verbose=True,
process=Process.sequential
)
# Run the crew when the user clicks the button
if st.button("Prepare Meeting"):
with st.spinner("AI agents are preparing your meeting..."):
result = meeting_prep_crew.kickoff()
st.markdown(result)
st.sidebar.markdown("""
## How to use this app:
1. Enter your API keys in the sidebar.
2. Provide the requested information about the meeting.
3. Click 'Prepare Meeting' to generate your comprehensive meeting preparation package.
The AI agents will work together to:
- Analyze the meeting context and company background
- Provide industry insights and trends
- Develop a tailored meeting strategy and agenda
- Create an executive brief with key talking points
This process may take a few minutes. Please be patient!
""")
else:
st.warning("Please enter all API keys in the sidebar before proceeding.") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_startup_insight_fire1_agent/ai_startup_insight_fire1_agent.py | advanced_ai_agents/single_agent_apps/ai_startup_insight_fire1_agent/ai_startup_insight_fire1_agent.py | from firecrawl import FirecrawlApp
import streamlit as st
import os
import json
from agno.agent import Agent
from agno.run.agent import RunOutput
from agno.models.openai import OpenAIChat
st.set_page_config(
page_title="Startup Info Extraction",
page_icon="🔍",
layout="wide"
)
st.title("AI Startup Insight with Firecrawl's FIRE-1 Agent")
# Sidebar for API key
with st.sidebar:
st.header("API Configuration")
firecrawl_api_key = st.text_input("Firecrawl API Key", type="password")
openai_api_key = st.text_input("OpenAI API Key", type="password")
st.caption("Your API keys are securely stored and not shared.")
st.markdown("---")
st.markdown("### About")
st.markdown("This tool extracts company information from websites using Firecrawl's FIRE-1 agent and provides AI-powered business analysis.")
st.markdown("### How It Works")
st.markdown("1. 🔍 **FIRE - 1 Agent** extracts structured data from websites")
st.markdown("2. 🧠 **Agno Agent** analyzes the data for business insights")
st.markdown("3. 📊 **Results** are presented in an organized format")
# Main content
# Add information about Firecrawl's capabilities
st.markdown("## 🔥 Firecrawl FIRE 1 Agent Capabilities")
col1, col2 = st.columns(2)
with col1:
st.info("**Advanced Web Extraction**\n\nFirecrawl's FIRE 1 agent combined with the extract endpoint can intelligently navigate websites to extract structured data, even from complex layouts and dynamic content.")
st.success("**Interactive Navigation**\n\nThe agent can interact with buttons, links, input fields, and other dynamic elements to access hidden information.")
with col2:
st.warning("**Multi-page Processing**\n\nFIRE can handle pagination and multi-step processes, allowing it to gather comprehensive data across entire websites.")
st.error("**Intelligent Data Structuring**\n\nThe agent automatically structures extracted information according to your specified schema, making it immediately usable.")
st.markdown("---")
st.markdown("### 🌐 Enter Website URLs")
st.markdown("Provide one or more company website URLs (one per line) to extract information.")
website_urls = st.text_area("Website URLs (one per line)", placeholder="https://example.com\nhttps://another-company.com")
# Define a JSON schema directly without Pydantic
extraction_schema = {
"type": "object",
"properties": {
"company_name": {
"type": "string",
"description": "The official name of the company or startup"
},
"company_description": {
"type": "string",
"description": "A description of what the company does and its value proposition"
},
"company_mission": {
"type": "string",
"description": "The company's mission statement or purpose"
},
"product_features": {
"type": "array",
"items": {
"type": "string"
},
"description": "Key features or capabilities of the company's products/services"
},
"contact_phone": {
"type": "string",
"description": "Company's contact phone number if available"
}
},
"required": ["company_name", "company_description", "product_features"]
}
# Custom CSS for better UI
st.markdown("""
<style>
.stButton button {
background-color: #FF4B4B;
color: white;
font-weight: bold;
border-radius: 10px;
padding: 0.5rem 1rem;
border: none;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
transition: all 0.3s ease;
}
.stButton button:hover {
background-color: #FF2B2B;
box-shadow: 0 6px 8px rgba(0, 0, 0, 0.15);
transform: translateY(-2px);
}
.css-1r6slb0 {
border-radius: 10px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
</style>
""", unsafe_allow_html=True)
# Start extraction when button is clicked
if st.button("🚀 Start Analysis", type="primary"):
if not website_urls.strip():
st.error("Please enter at least one website URL")
else:
try:
with st.spinner("Extracting information from website..."):
# Initialize the FirecrawlApp with the API key
app = FirecrawlApp(api_key=firecrawl_api_key)
# Parse the input URLs more robustly
# Split by newline, strip whitespace from each line, and filter out empty lines
urls = [url.strip() for url in website_urls.split('\n') if url.strip()]
# Debug: Show the parsed URLs
st.info(f"Attempting to process these URLs: {urls}")
if not urls:
st.error("No valid URLs found after parsing. Please check your input.")
elif not openai_api_key:
st.warning("Please provide an OpenAI API key in the sidebar to get AI analysis.")
else:
# Create tabs for each URL
tabs = st.tabs([f"Website {i+1}: {url}" for i, url in enumerate(urls)])
# Initialize the Agno agent once (outside the loop)
if openai_api_key:
agno_agent = Agent(
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
instructions="""You are an expert business analyst who provides concise, insightful summaries of companies.
You will be given structured data about a company including its name, description, mission, and product features.
Your task is to analyze this information and provide a brief, compelling summary that highlights:
1. What makes this company unique or innovative
2. The core value proposition for customers
3. The potential market impact or growth opportunities
Keep your response under 150 words, be specific, and focus on actionable insights.
""",
markdown=True
)
# Process each URL one at a time
for i, (url, tab) in enumerate(zip(urls, tabs)):
with tab:
st.markdown(f"### 🔍 Analyzing: {url}")
st.markdown("<hr style='border: 2px solid #FF4B4B; border-radius: 5px;'>", unsafe_allow_html=True)
with st.spinner(f"FIRE agent is extracting information from {url}..."):
try:
# Extract data for this single URL
data = app.extract(
[url], # Pass as a list with a single URL
params={
'prompt': '''
Analyze this company website thoroughly and extract comprehensive information.
1. Company Information:
- Identify the official company name
Explain: This is the legal name the company operates under.
- Extract a detailed yet concise description of what the company does
- Find the company's mission statement or purpose
Explain: What problem is the company trying to solve? How do they aim to make a difference?
2. Product/Service Information:
- Identify 3-5 specific product features or service offerings
Explain: What are the key things their product or service can do? Describe as if explaining to a non-expert.
- Focus on concrete capabilities rather than marketing claims
Explain: What does the product actually do, in simple terms, rather than how it's advertised?
- Be specific about what the product/service actually does
Explain: Give examples of how a customer might use this product or service in their daily life.
3. Contact Information:
- Find direct contact methods (phone numbers)
Explain: How can a potential customer reach out to speak with someone at the company?
- Only extract contact information that is explicitly provided
Explain: We're looking for official contact details, not inferring or guessing.
Important guidelines:
- Be thorough but concise in your descriptions
- Extract factual information, not marketing language
- If information is not available, do not make assumptions
- For each piece of information, provide a brief, simple explanation of what it means and why it's important
- Include a layman's explanation of what the company does, as if explaining to someone with no prior knowledge of the industry or technology involved
''',
'schema': extraction_schema,
'agent': {"model": "FIRE-1"}
}
)
# Check if extraction was successful
if data and data.get('data'):
# Display extracted data
st.subheader("📊 Extracted Information")
company_data = data.get('data')
# Display company name prominently
if 'company_name' in company_data:
st.markdown(f"{company_data['company_name']}")
# Display other extracted fields
for key, value in company_data.items():
if key == 'company_name':
continue # Already displayed above
display_key = key.replace('_', ' ').capitalize()
if value: # Only display if there's a value
if isinstance(value, list):
st.markdown(f"**{display_key}:**")
for item in value:
st.markdown(f"- {item}")
elif isinstance(value, str):
st.markdown(f"**{display_key}:** {value}")
elif isinstance(value, bool):
st.markdown(f"**{display_key}:** {str(value)}")
else:
st.write(f"**{display_key}:**", value)
# Process with Agno agent
if openai_api_key:
with st.spinner("Generating AI analysis..."):
# Run the agent with the extracted data
agent_response: RunOutput = agno_agent.run(f"Analyze this company data and provide insights: {json.dumps(company_data)}")
# Display the agent's analysis in a highlighted box
st.subheader("🧠 AI Business Analysis")
st.markdown(agent_response.content)
# Show raw data in expander
with st.expander("🔍 View Raw API Response"):
st.json(data)
# Add processing details
with st.expander("ℹ️ Processing Details"):
st.markdown("**FIRE Agent Actions:**")
st.markdown("- 🔍 Scanned website content and structure")
st.markdown("- 🖱️ Interacted with necessary page elements")
st.markdown("- 📊 Extracted and structured data according to schema")
st.markdown("- 🧠 Applied AI reasoning to identify relevant information")
if 'status' in data:
st.markdown(f"**Status:** {data['status']}")
if 'expiresAt' in data:
st.markdown(f"**Data Expires:** {data['expiresAt']}")
else:
st.error(f"No data was extracted from {url}. The website might be inaccessible, or the content structure may not match the expected format.")
except Exception as e:
st.error(f"Error processing {url}: {str(e)}")
except Exception as e:
st.error(f"Error during extraction: {str(e)}")
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/research_agent_gemini_interaction_api/research_planner_executor_agent.py | advanced_ai_agents/single_agent_apps/research_agent_gemini_interaction_api/research_planner_executor_agent.py | """Research Planner using Gemini Interactions API - demonstrates stateful conversations, model mixing, and background execution."""
import streamlit as st, time, re
from google import genai
def get_text(outputs): return "\n".join(o.text for o in (outputs or []) if hasattr(o, 'text') and o.text) or ""
def parse_tasks(text):
return [{"num": m.group(1), "text": m.group(2).strip().replace('\n', ' ')}
for m in re.finditer(r'^(\d+)[\.\)\-]\s*(.+?)(?=\n\d+[\.\)\-]|\n\n|\Z)', text, re.MULTILINE | re.DOTALL)]
def wait_for_completion(client, iid, timeout=300):
progress, status, elapsed = st.progress(0), st.empty(), 0
while elapsed < timeout:
interaction = client.interactions.get(iid)
if interaction.status != "in_progress": progress.progress(100); return interaction
elapsed += 3; progress.progress(min(90, int(elapsed/timeout*100))); status.text(f"⏳ {elapsed}s..."); time.sleep(3)
return client.interactions.get(iid)
# Setup
st.set_page_config(page_title="Research Planner", page_icon="🔬", layout="wide")
st.title("🔬 AI Research Planner & Executor Agent (Gemini Interactions API) ✨")
for k in ["plan_id", "plan_text", "tasks", "research_id", "research_text", "synthesis_text", "infographic"]:
if k not in st.session_state: st.session_state[k] = [] if k == "tasks" else None
with st.sidebar:
api_key = st.text_input("🔑 Google API Key", type="password")
if st.button("Reset"): [setattr(st.session_state, k, [] if k == "tasks" else None) for k in ["plan_id", "plan_text", "tasks", "research_id", "research_text", "synthesis_text", "infographic"]]; st.rerun()
st.markdown("""
### How It Works
1. **Plan** → Gemini 3 Flash creates research tasks
2. **Select** → Choose which tasks to research
3. **Research** → Deep Research Agent investigates
4. **Synthesize** → Gemini 3 Pro writes report + TL;DR infographic
Each phase chains via `previous_interaction_id` for context.
""")
client = genai.Client(api_key=api_key) if api_key else None
if not client: st.info("👆 Enter API key to start"); st.stop()
# Phase 1: Plan
research_goal = st.text_area("📝 Research Goal", placeholder="e.g., Research B2B HR SaaS market in Germany")
if st.button("📋 Generate Plan", disabled=not research_goal, type="primary"):
with st.spinner("Planning..."):
try:
i = client.interactions.create(model="gemini-3-flash-preview", input=f"Create a numbered research plan for: {research_goal}\n\nFormat: 1. [Task] - [Details]\n\nInclude 5-8 specific tasks.", tools=[{"type": "google_search"}], store=True)
st.session_state.plan_id, st.session_state.plan_text, st.session_state.tasks = i.id, get_text(i.outputs), parse_tasks(get_text(i.outputs))
except Exception as e: st.error(f"Error: {e}")
# Phase 2: Select & Research
if st.session_state.plan_text:
st.divider(); st.subheader("🔍 Select Tasks & Research")
selected = [f"{t['num']}. {t['text']}" for t in st.session_state.tasks if st.checkbox(f"**{t['num']}.** {t['text']}", True, key=f"t{t['num']}")]
st.caption(f"✅ {len(selected)}/{len(st.session_state.tasks)} selected")
if st.button("🚀 Start Deep Research", type="primary", disabled=not selected):
with st.spinner("Researching (2-5 min)..."):
try:
i = client.interactions.create(agent="deep-research-pro-preview-12-2025", input=f"Research these tasks thoroughly with sources:\n\n" + "\n\n".join(selected), previous_interaction_id=st.session_state.plan_id, background=True, store=True)
i = wait_for_completion(client, i.id)
st.session_state.research_id, st.session_state.research_text = i.id, get_text(i.outputs) or f"Status: {i.status}"
st.rerun()
except Exception as e: st.error(f"Error: {e}")
if st.session_state.research_text:
st.divider(); st.subheader("📄 Research Results"); st.markdown(st.session_state.research_text)
# Phase 3: Synthesis + Infographic
if st.session_state.research_id:
if st.button("📊 Generate Executive Report", type="primary"):
with st.spinner("Synthesizing report..."):
try:
i = client.interactions.create(model="gemini-3-pro-preview", input=f"Create executive report with Summary, Findings, Recommendations, Risks:\n\n{st.session_state.research_text}", previous_interaction_id=st.session_state.research_id, store=True)
st.session_state.synthesis_text = get_text(i.outputs)
except Exception as e: st.error(f"Error: {e}"); st.stop()
with st.spinner("Creating TL;DR infographic..."):
try:
response = client.models.generate_content(
model="gemini-3-pro-image-preview",
contents=f"Create a whiteboard summary infographic for the following: {st.session_state.synthesis_text}"
)
for part in response.candidates[0].content.parts:
if hasattr(part, 'inline_data') and part.inline_data:
st.session_state.infographic = part.inline_data.data
break
except Exception as e: st.warning(f"Infographic error: {e}")
st.rerun()
if st.session_state.synthesis_text:
st.divider(); st.markdown("## 📊 Executive Report")
# TL;DR Infographic at the top
if st.session_state.infographic:
st.markdown("### 🎨 TL;DR")
st.image(st.session_state.infographic, use_container_width=True)
st.divider()
st.markdown(st.session_state.synthesis_text)
st.download_button("📥 Download Report", st.session_state.synthesis_text, "research_report.md", "text/markdown")
st.divider(); st.caption("[Gemini Interactions API](https://ai.google.dev/gemini-api/docs/interactions)") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_customer_support_agent/customer_support_agent.py | advanced_ai_agents/single_agent_apps/ai_customer_support_agent/customer_support_agent.py | import streamlit as st
from openai import OpenAI
from mem0 import Memory
import os
import json
from datetime import datetime, timedelta
# Set up the Streamlit App
st.title("AI Customer Support Agent with Memory 🛒")
st.caption("Chat with a customer support assistant who remembers your past interactions.")
# Set the OpenAI API key
openai_api_key = st.text_input("Enter OpenAI API Key", type="password")
if openai_api_key:
os.environ['OPENAI_API_KEY'] = openai_api_key
class CustomerSupportAIAgent:
def __init__(self):
# Initialize Mem0 with Qdrant as the vector store
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"host": "localhost",
"port": 6333,
}
},
}
try:
self.memory = Memory.from_config(config)
except Exception as e:
st.error(f"Failed to initialize memory: {e}")
st.stop() # Stop execution if memory initialization fails
self.client = OpenAI()
self.app_id = "customer-support"
def handle_query(self, query, user_id=None):
try:
# Search for relevant memories
relevant_memories = self.memory.search(query=query, user_id=user_id)
# Build context from relevant memories
context = "Relevant past information:\n"
if relevant_memories and "results" in relevant_memories:
for memory in relevant_memories["results"]:
if "memory" in memory:
context += f"- {memory['memory']}\n"
# Generate a response using OpenAI
full_prompt = f"{context}\nCustomer: {query}\nSupport Agent:"
response = self.client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a customer support AI agent for TechGadgets.com, an online electronics store."},
{"role": "user", "content": full_prompt}
]
)
answer = response.choices[0].message.content
# Add the query and answer to memory
self.memory.add(query, user_id=user_id, metadata={"app_id": self.app_id, "role": "user"})
self.memory.add(answer, user_id=user_id, metadata={"app_id": self.app_id, "role": "assistant"})
return answer
except Exception as e:
st.error(f"An error occurred while handling the query: {e}")
return "Sorry, I encountered an error. Please try again later."
def get_memories(self, user_id=None):
try:
# Retrieve all memories for a user
return self.memory.get_all(user_id=user_id)
except Exception as e:
st.error(f"Failed to retrieve memories: {e}")
return None
def generate_synthetic_data(self, user_id: str) -> dict | None:
try:
today = datetime.now()
order_date = (today - timedelta(days=10)).strftime("%B %d, %Y")
expected_delivery = (today + timedelta(days=2)).strftime("%B %d, %Y")
prompt = f"""Generate a detailed customer profile and order history for a TechGadgets.com customer with ID {user_id}. Include:
1. Customer name and basic info
2. A recent order of a high-end electronic device (placed on {order_date}, to be delivered by {expected_delivery})
3. Order details (product, price, order number)
4. Customer's shipping address
5. 2-3 previous orders from the past year
6. 2-3 customer service interactions related to these orders
7. Any preferences or patterns in their shopping behavior
Format the output as a JSON object."""
response = self.client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a data generation AI that creates realistic customer profiles and order histories. Always respond with valid JSON."},
{"role": "user", "content": prompt}
]
)
customer_data = json.loads(response.choices[0].message.content)
# Add generated data to memory
for key, value in customer_data.items():
if isinstance(value, list):
for item in value:
self.memory.add(
json.dumps(item),
user_id=user_id,
metadata={"app_id": self.app_id, "role": "system"}
)
else:
self.memory.add(
f"{key}: {json.dumps(value)}",
user_id=user_id,
metadata={"app_id": self.app_id, "role": "system"}
)
return customer_data
except Exception as e:
st.error(f"Failed to generate synthetic data: {e}")
return None
# Initialize the CustomerSupportAIAgent
support_agent = CustomerSupportAIAgent()
# Sidebar for customer ID and memory view
st.sidebar.title("Enter your Customer ID:")
previous_customer_id = st.session_state.get("previous_customer_id", None)
customer_id = st.sidebar.text_input("Enter your Customer ID")
if customer_id != previous_customer_id:
st.session_state.messages = []
st.session_state.previous_customer_id = customer_id
st.session_state.customer_data = None
# Add button to generate synthetic data
if st.sidebar.button("Generate Synthetic Data"):
if customer_id:
with st.spinner("Generating customer data..."):
st.session_state.customer_data = support_agent.generate_synthetic_data(customer_id)
if st.session_state.customer_data:
st.sidebar.success("Synthetic data generated successfully!")
else:
st.sidebar.error("Failed to generate synthetic data.")
else:
st.sidebar.error("Please enter a customer ID first.")
if st.sidebar.button("View Customer Profile"):
if st.session_state.customer_data:
st.sidebar.json(st.session_state.customer_data)
else:
st.sidebar.info("No customer data generated yet. Click 'Generate Synthetic Data' first.")
if st.sidebar.button("View Memory Info"):
if customer_id:
memories = support_agent.get_memories(user_id=customer_id)
if memories:
st.sidebar.write(f"Memory for customer **{customer_id}**:")
if memories and "results" in memories:
for memory in memories["results"]:
if "memory" in memory:
st.write(f"- {memory['memory']}")
else:
st.sidebar.info("No memory found for this customer ID.")
else:
st.sidebar.error("Please enter a customer ID to view memory info.")
# Initialize the chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display the chat history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
query = st.chat_input("How can I assist you today?")
if query and customer_id:
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": query})
with st.chat_message("user"):
st.markdown(query)
# Generate and display response
with st.spinner("Generating response..."):
answer = support_agent.handle_query(query, user_id=customer_id)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": answer})
with st.chat_message("assistant"):
st.markdown(answer)
elif not customer_id:
st.error("Please enter a customer ID to start the chat.")
else:
st.warning("Please enter your OpenAI API key to use the customer support agent.") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_movie_production_agent/movie_production_agent.py | advanced_ai_agents/single_agent_apps/ai_movie_production_agent/movie_production_agent.py | # Import the required libraries
import streamlit as st
from agno.agent import Agent
from agno.run.agent import RunOutput
from agno.team import Team
from agno.tools.serpapi import SerpApiTools
from agno.models.google import Gemini
from textwrap import dedent
# Set up the Streamlit app
st.title("AI Movie Production Agent 🎬")
st.caption("Bring your movie ideas to life with the teams of script writing and casting AI agents")
# Get Google API key from user
google_api_key = st.text_input("Enter Google API Key to access Gemini 2.5 Flash", type="password")
# Get SerpAPI key from the user
serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password")
if google_api_key and serp_api_key:
script_writer = Agent(
name="ScriptWriter",
model=Gemini(id="gemini-2.5-flash", api_key=google_api_key),
description=dedent(
"""\
You are an expert screenplay writer. Given a movie idea and genre,
develop a compelling script outline with character descriptions and key plot points.
"""
),
instructions=[
"Write a script outline with 3-5 main characters and key plot points.",
"Outline the three-act structure and suggest 2-3 twists.",
"Ensure the script aligns with the specified genre and target audience.",
],
)
casting_director = Agent(
name="CastingDirector",
model=Gemini(id="gemini-2.5-flash", api_key=google_api_key),
description=dedent(
"""\
You are a talented casting director. Given a script outline and character descriptions,
suggest suitable actors for the main roles, considering their past performances and current availability.
"""
),
instructions=[
"Suggest 2-3 actors for each main role.",
"Check actors' current status using `search_google`.",
"Provide a brief explanation for each casting suggestion.",
"Consider diversity and representation in your casting choices.",
],
tools=[SerpApiTools(api_key=serp_api_key)],
)
movie_producer = Team(
name="MovieProducer",
model=Gemini(id="gemini-2.5-flash", api_key=google_api_key),
members=[script_writer, casting_director],
description="Experienced movie producer overseeing script and casting.",
instructions=[
"Ask ScriptWriter for a script outline based on the movie idea.",
"Pass the outline to CastingDirector for casting suggestions.",
"Summarize the script outline and casting suggestions.",
"Provide a concise movie concept overview.",
],
markdown=True,
)
# Input field for the report query
movie_idea = st.text_area("Describe your movie idea in a few sentences:")
genre = st.selectbox("Select the movie genre:",
["Action", "Comedy", "Drama", "Sci-Fi", "Horror", "Romance", "Thriller"])
target_audience = st.selectbox("Select the target audience:",
["General", "Children", "Teenagers", "Adults", "Mature"])
estimated_runtime = st.slider("Estimated runtime (in minutes):", 60, 180, 120)
# Process the movie concept
if st.button("Develop Movie Concept"):
with st.spinner("Developing movie concept..."):
input_text = (
f"Movie idea: {movie_idea}, Genre: {genre}, "
f"Target audience: {target_audience}, Estimated runtime: {estimated_runtime} minutes"
)
# Get the response from the assistant
response: RunOutput = movie_producer.run(input_text, stream=False)
st.write(response.content) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_email_gtm_reachout_agent/ai_email_gtm_reachout.py | advanced_ai_agents/single_agent_apps/ai_email_gtm_reachout_agent/ai_email_gtm_reachout.py | import json
import os
import streamlit as st
from datetime import datetime
from textwrap import dedent
from typing import Dict, Iterator, List, Optional, Literal
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.db.sqlite import SqliteDb
from agno.tools.exa import ExaTools
from agno.utils.log import logger
from agno.utils.pprint import pprint_run_response
from agno.workflow import Workflow
from pydantic import BaseModel, Field
# Initialize API keys from environment or empty defaults
if 'EXA_API_KEY' not in st.session_state:
st.session_state.EXA_API_KEY = os.getenv("EXA_API_KEY", "")
if 'OPENAI_API_KEY' not in st.session_state:
st.session_state.OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
# Set environment variables
os.environ["EXA_API_KEY"] = st.session_state.EXA_API_KEY
os.environ["OPENAI_API_KEY"] = st.session_state.OPENAI_API_KEY
# Demo mode
# - set to True to print email to console
# - set to False to send to yourself
DEMO_MODE = True
today = datetime.now().strftime("%Y-%m-%d")
# Example leads - Replace with your actual targets
leads: Dict[str, Dict[str, str]] = {
"Notion": {
"name": "Notion",
"website": "https://www.notion.so",
"contact_name": "Ivan Zhao",
"position": "CEO",
},
# Add more companies as needed
}
# Updated sender details for an AI analytics company
sender_details_dict: Dict[str, str] = {
"name": "Sarah Chen",
"email": "your.email@company.com", # Your email goes here
"organization": "Data Consultants Inc",
"service_offered": "We help build data products and offer data consulting services",
"calendar_link": "https://calendly.com/data-consultants-inc",
"linkedin": "https://linkedin.com/in/your-profile",
"phone": "+1 (555) 123-4567",
"website": "https://www.data-consultants.com",
}
DEPARTMENT_TEMPLATES = {
"GTM (Sales & Marketing)": {
"Software Solution": """\
Hey [RECIPIENT_NAME],
I noticed [COMPANY_NAME]'s impressive [GTM_INITIATIVE] and your role in scaling [SPECIFIC_ACHIEVEMENT]. Your approach to [SALES_STRATEGY] caught my attention.
[PRODUCT_VALUE_FOR_GTM]
[GTM_SPECIFIC_BENEFIT]
Would love to show you how this could work for your team: [CALENDAR_LINK]
Best,
[SIGNATURE]\
""",
"Consulting Services": """\
Hey [RECIPIENT_NAME],
Your team's recent success with [CAMPAIGN_NAME] is impressive, particularly the [SPECIFIC_METRIC].
[CONSULTING_VALUE_PROP]
[GTM_IMPROVEMENT_POTENTIAL]
Here's my calendar if you'd like to explore this: [CALENDAR_LINK]
Best,
[SIGNATURE]\
"""
},
"Human Resources": {
"Software Solution": """\
Hey [RECIPIENT_NAME],
I've been following [COMPANY_NAME]'s growth and noticed your focus on [HR_INITIATIVE]. Your approach to [SPECIFIC_HR_PROGRAM] stands out.
[HR_TOOL_VALUE_PROP]
[HR_SPECIFIC_BENEFIT]
Would you be open to seeing how this could help your HR initiatives? [CALENDAR_LINK]
Best,
[SIGNATURE]\
""",
"Consulting Services": """\
Hey [RECIPIENT_NAME],
I've been following [COMPANY_NAME]'s journey in [INDUSTRY], and your recent [ACHIEVEMENT] caught my attention. Your approach to [SPECIFIC_FOCUS] aligns perfectly with what we're building.
[PARTNERSHIP_VALUE_PROP]
[MUTUAL_BENEFIT]
Would love to explore potential synergies over a quick call: [CALENDAR_LINK]
Best,
[SIGNATURE]\
""",
"Investment Opportunity": """\
Hey [RECIPIENT_NAME],
Your work at [COMPANY_NAME] in [SPECIFIC_FOCUS] is impressive, especially [RECENT_ACHIEVEMENT].
[INVESTMENT_THESIS]
[UNIQUE_VALUE_ADD]
Here's my calendar if you'd like to discuss: [CALENDAR_LINK]
Best,
[SIGNATURE]\
"""
},
"Marketing Professional": {
"Product Demo": """\
Hey [RECIPIENT_NAME],
I noticed [COMPANY_NAME]'s recent [MARKETING_INITIATIVE] and was impressed by [SPECIFIC_DETAIL].
[PRODUCT_VALUE_PROP]
[BENEFIT_TO_MARKETING]
Would you be open to a quick demo? Here's my calendar: [CALENDAR_LINK]
Best,
[SIGNATURE]\
""",
"Service Offering": """\
Hey [RECIPIENT_NAME],
Saw your team's work on [RECENT_CAMPAIGN] - great execution on [SPECIFIC_ELEMENT].
[SERVICE_VALUE_PROP]
[MARKETING_BENEFIT]
Here's my calendar if you'd like to explore this: [CALENDAR_LINK]
Best,
[SIGNATURE]\
"""
},
"B2B Sales Representative": {
"Product Demo": """\
Hey [RECIPIENT_NAME],
Noticed your team at [COMPANY_NAME] is scaling [SALES_FOCUS]. Your approach to [SPECIFIC_STRATEGY] is spot-on.
[PRODUCT_VALUE_PROP]
[SALES_BENEFIT]
Would you be interested in seeing how this works? Here's my calendar: [CALENDAR_LINK]
Best,
[SIGNATURE]\
""",
"Service Offering": """\
Hey [RECIPIENT_NAME],
Your sales team's success with [RECENT_WIN] caught my attention. Particularly impressed by [SPECIFIC_ACHIEVEMENT].
[SERVICE_VALUE_PROP]
[SALES_IMPROVEMENT]
Here's my calendar if you'd like to discuss: [CALENDAR_LINK]
Best,
[SIGNATURE]\
"""
}
}
COMPANY_CATEGORIES = {
"SaaS/Technology Companies": {
"description": "Software, cloud services, and tech platforms",
"typical_roles": ["CTO", "Head of Engineering", "VP of Product", "Engineering Manager", "Tech Lead"]
},
"E-commerce/Retail": {
"description": "Online retail, marketplaces, and D2C brands",
"typical_roles": ["Head of Digital", "E-commerce Manager", "Marketing Director", "Operations Head"]
},
"Financial Services": {
"description": "Banks, fintech, insurance, and investment firms",
"typical_roles": ["CFO", "Head of Innovation", "Risk Manager", "Product Manager"]
},
"Healthcare/Biotech": {
"description": "Healthcare providers, biotech, and health tech",
"typical_roles": ["Medical Director", "Head of R&D", "Clinical Manager", "Healthcare IT Lead"]
},
"Manufacturing/Industrial": {
"description": "Manufacturing, industrial automation, and supply chain",
"typical_roles": ["Operations Director", "Plant Manager", "Supply Chain Head", "Quality Manager"]
}
}
class OutreachConfig(BaseModel):
"""Configuration for email outreach"""
company_category: str = Field(..., description="Type of companies to target")
target_departments: List[str] = Field(
...,
description="Departments to target (e.g., GTM, HR, Engineering)"
)
service_type: Literal[
"Software Solution",
"Consulting Services",
"Professional Services",
"Technology Platform",
"Custom Development"
] = Field(..., description="Type of service being offered")
company_size_preference: Literal["Startup (1-50)", "SMB (51-500)", "Enterprise (500+)", "All Sizes"] = Field(
default="All Sizes",
description="Preferred company size"
)
personalization_level: Literal["Basic", "Medium", "Deep"] = Field(
default="Deep",
description="Level of personalization"
)
class ContactInfo(BaseModel):
"""Contact information for decision makers"""
name: str = Field(..., description="Contact's full name")
title: str = Field(..., description="Job title/position")
email: Optional[str] = Field(None, description="Email address")
linkedin: Optional[str] = Field(None, description="LinkedIn profile URL")
company: str = Field(..., description="Company name")
department: Optional[str] = Field(None, description="Department")
background: Optional[str] = Field(None, description="Professional background")
class CompanyInfo(BaseModel):
"""
Stores in-depth data about a company gathered during the research phase.
"""
# Basic Information
company_name: str = Field(..., description="Company name")
website_url: str = Field(..., description="Company website URL")
# Business Details
industry: Optional[str] = Field(None, description="Primary industry")
core_business: Optional[str] = Field(None, description="Main business focus")
business_model: Optional[str] = Field(None, description="B2B, B2C, etc.")
# Marketing Information
motto: Optional[str] = Field(None, description="Company tagline/slogan")
value_proposition: Optional[str] = Field(None, description="Main value proposition")
target_audience: Optional[List[str]] = Field(
None, description="Target customer segments"
)
# Company Metrics
company_size: Optional[str] = Field(None, description="Employee count range")
founded_year: Optional[int] = Field(None, description="Year founded")
locations: Optional[List[str]] = Field(None, description="Office locations")
# Technical Details
technologies: Optional[List[str]] = Field(None, description="Technology stack")
integrations: Optional[List[str]] = Field(None, description="Software integrations")
# Market Position
competitors: Optional[List[str]] = Field(None, description="Main competitors")
unique_selling_points: Optional[List[str]] = Field(
None, description="Key differentiators"
)
market_position: Optional[str] = Field(None, description="Market positioning")
# Social Proof
customers: Optional[List[str]] = Field(None, description="Notable customers")
case_studies: Optional[List[str]] = Field(None, description="Success stories")
awards: Optional[List[str]] = Field(None, description="Awards and recognition")
# Recent Activity
recent_news: Optional[List[str]] = Field(None, description="Recent news/updates")
blog_topics: Optional[List[str]] = Field(None, description="Recent blog topics")
# Pain Points & Opportunities
challenges: Optional[List[str]] = Field(None, description="Potential pain points")
growth_areas: Optional[List[str]] = Field(None, description="Growth opportunities")
# Contact Information
email_address: Optional[str] = Field(None, description="Contact email")
phone: Optional[str] = Field(None, description="Contact phone")
social_media: Optional[Dict[str, str]] = Field(
None, description="Social media links"
)
# Additional Fields
pricing_model: Optional[str] = Field(None, description="Pricing strategy and tiers")
user_base: Optional[str] = Field(None, description="Estimated user base size")
key_features: Optional[List[str]] = Field(None, description="Main product features")
integration_ecosystem: Optional[List[str]] = Field(
None, description="Integration partners"
)
funding_status: Optional[str] = Field(
None, description="Latest funding information"
)
growth_metrics: Optional[Dict[str, str]] = Field(
None, description="Key growth indicators"
)
class PersonalisedEmailGenerator(Workflow):
"""
Automated B2B outreach system that:
1. Discovers companies using Exa search based on criteria
2. Finds contact details for decision makers at those companies
3. Researches company details and pain points
4. Generates personalized cold emails for B2B outreach
This workflow is designed to automate the entire prospecting process
from company discovery to personalized email generation.
"""
description: str = dedent("""\
AI-Powered B2B Outreach Workflow:
--------------------------------------------------------
1. Discover Target Companies (Exa Search)
2. Find Decision Maker Contacts
3. Research Company Intelligence
4. Generate Personalized Emails
--------------------------------------------------------
Fully automated prospecting pipeline for B2B outreach.
""")
company_finder: Agent = Agent(
model=OpenAIChat(id="gpt-5"),
tools=[ExaTools(api_key=os.environ["EXA_API_KEY"])],
description="Expert at finding companies that match specific criteria using web search",
instructions=dedent("""\
You are a company discovery specialist. Your job is to find companies that match the given criteria.
Search for companies based on:
- Industry/sector
- Company size
- Geographic location
- Business model
- Technology stack
- Recent funding/growth
For each company found, provide:
- Company name
- Website URL
- Brief description
- Industry
- Estimated size
- Location
Focus on finding companies that would be good prospects for the specified service offering.
Look for companies showing signs of growth, funding, or expansion.
"""),
)
contact_finder: Agent = Agent(
model=OpenAIChat(id="gpt-5"),
tools=[ExaTools(api_key=os.environ["EXA_API_KEY"])],
description="Expert at finding contact information for decision makers at companies",
instructions=dedent("""\
You are a contact research specialist. Find decision makers and their contact information.
For each company, search for:
- Key decision makers in target departments
- Their email addresses
- LinkedIn profiles
- Professional backgrounds
- Current role and responsibilities
Focus on finding people in roles like:
- CEO, CTO, VP of Engineering (for tech solutions)
- CMO, VP Marketing, Growth Lead (for marketing solutions)
- VP Sales, Sales Director (for sales solutions)
- HR Director, People Ops (for HR solutions)
Provide verified contact information when possible.
"""),
)
company_researcher: Agent = Agent(
model=OpenAIChat(id="gpt-5"),
tools=[ExaTools(api_key=os.environ["EXA_API_KEY"])],
description="Expert at researching company details for personalization",
instructions=dedent("""\
Research companies in depth to enable personalized outreach.
Analyze:
- Company website and messaging
- Recent news and updates
- Product/service offerings
- Technology stack
- Growth indicators
- Pain points and challenges
- Recent achievements
- Market position
Focus on insights that would be relevant for B2B outreach:
- Scaling challenges
- Technology needs
- Market expansion
- Competitive positioning
- Recent wins or milestones
"""),
)
email_creator: Agent = Agent(
model=OpenAIChat(id="gpt-5"),
description=dedent("""\
You are writing for a friendly, empathetic 20-year-old sales rep whose
style is cool, concise, and respectful. Tone is casual yet professional.
- Be polite but natural, using simple language.
- Never sound robotic or use big cliché words like "delve", "synergy" or "revolutionary."
- Clearly address problems the prospect might be facing and how we solve them.
- Keep paragraphs short and friendly, with a natural voice.
- End on a warm, upbeat note, showing willingness to help.\
"""),
instructions=dedent("""\
Please craft a highly personalized email that has:
1. A simple, personal subject line referencing the problem or opportunity.
2. At least one area for improvement or highlight from research.
3. A quick explanation of how we can help them (no heavy jargon).
4. References a known challenge from the research.
5. Avoid words like "delve", "explore", "synergy", "amplify", "game changer", "revolutionary", "breakthrough".
6. Use first-person language ("I") naturally.
7. Maintain a 20-year-old's friendly style—brief and to the point.
8. Avoid placing the recipient's name in the subject line.
Use the appropriate template based on the target professional type and outreach purpose.
Ensure the final tone feels personal and conversation-like, not automatically generated.
----------------------------------------------------------------------
"""),
)
def get_cached_data(self, cache_key: str) -> Optional[dict]:
"""Retrieve cached data"""
logger.info(f"Checking cache for: {cache_key}")
return self.session_state.get("cache", {}).get(cache_key)
def cache_data(self, cache_key: str, data: dict):
"""Cache data"""
logger.info(f"Caching data for: {cache_key}")
self.session_state.setdefault("cache", {})
self.session_state["cache"][cache_key] = data
self.write_to_storage()
def run(
self,
config: OutreachConfig,
sender_details: Dict[str, str],
num_companies: int = 5,
use_cache: bool = True,
):
"""
Automated B2B outreach workflow:
1. Discover companies using Exa search based on criteria
2. Find decision maker contacts for each company
3. Research company details for personalization
4. Generate personalized emails
"""
logger.info("Starting automated B2B outreach workflow...")
# Step 1: Discover companies
logger.info("🔍 Discovering target companies...")
search_query = f"""
Find {num_companies} {config.company_category} companies that would be good prospects for {config.service_type}.
Company criteria:
- Industry: {config.company_category}
- Size: {config.company_size_preference}
- Target departments: {', '.join(config.target_departments)}
Look for companies showing growth, recent funding, or expansion.
"""
companies_response = self.company_finder.run(search_query)
if not companies_response or not companies_response.content:
logger.error("No companies found")
return
# Parse companies from response
companies_text = companies_response.content
logger.info(f"Found companies: {companies_text[:200]}...")
# Step 2: For each company, find contacts and research
for i in range(num_companies):
try:
logger.info(f"Processing company #{i+1}")
# Yield progress update
yield {
"step": f"Processing company {i+1}/{num_companies}",
"progress": (i + 0.2) / num_companies,
"status": "Finding contacts..."
}
# Extract company info from the response
company_search = f"Extract company #{i+1} details from: {companies_text}"
# Step 3: Find decision maker contacts
logger.info("👥 Finding decision maker contacts...")
contacts_query = f"""
Find decision makers at company #{i+1} from this list: {companies_text}
Focus on roles in: {', '.join(config.target_departments)}
Find their email addresses and LinkedIn profiles.
"""
contacts_response = self.contact_finder.run(contacts_query)
if not contacts_response or not contacts_response.content:
logger.warning(f"No contacts found for company #{i+1}")
continue
# Yield progress update
yield {
"step": f"Processing company {i+1}/{num_companies}",
"progress": (i + 0.4) / num_companies,
"status": "Researching company..."
}
# Step 4: Research company details
logger.info("🔬 Researching company details...")
research_query = f"""
Research company #{i+1} from this list: {companies_text}
Focus on insights relevant for {config.service_type} outreach.
Find pain points related to {', '.join(config.target_departments)}.
"""
research_response = self.company_researcher.run(research_query)
if not research_response or not research_response.content:
logger.warning(f"No research data for company #{i+1}")
continue
# Parse the research response content
research_content = research_response.content
if not research_content:
logger.warning(f"No research data for company #{i+1}")
continue
# Create a basic company info structure from the research
company_data = CompanyInfo(
company_name=f"Company #{i+1}", # Will be updated with actual name
website_url="", # Will be updated with actual URL
industry="Unknown",
core_business=research_content[:200] if research_content else "No data available"
)
# Yield progress update
yield {
"step": f"Processing company {i+1}/{num_companies}",
"progress": (i + 0.6) / num_companies,
"status": "Generating email..."
}
# Step 5: Generate personalized email
logger.info("✉️ Generating personalized email...")
# Get appropriate template based on target departments
template_dept = config.target_departments[0] if config.target_departments else "GTM (Sales & Marketing)"
if template_dept in DEPARTMENT_TEMPLATES and config.service_type in DEPARTMENT_TEMPLATES[template_dept]:
template = DEPARTMENT_TEMPLATES[template_dept][config.service_type]
else:
template = DEPARTMENT_TEMPLATES["GTM (Sales & Marketing)"]["Software Solution"]
email_context = json.dumps(
{
"template": template,
"company_info": company_data.model_dump(),
"contacts_info": contacts_response.content,
"sender_details": sender_details,
"target_departments": config.target_departments,
"service_type": config.service_type,
"personalization_level": config.personalization_level
},
indent=4,
)
email_response = self.email_creator.run(
f"Generate a personalized email using this context:\n{email_context}"
)
if not email_response or not email_response.content:
logger.warning(f"No email generated for company #{i+1}")
continue
yield {
"company_name": company_data.company_name,
"email": email_response.content,
"company_data": company_data.model_dump(),
"contacts": contacts_response.content,
"step": f"Company {i+1}/{num_companies} completed",
"progress": (i + 1) / num_companies,
"status": "Completed"
}
except Exception as e:
logger.error(f"Error processing company #{i+1}: {e}")
continue
def create_streamlit_ui():
"""Create the Streamlit user interface"""
st.title("🚀 Automated B2B Email Outreach Generator")
st.markdown("""
**Fully automated prospecting pipeline**: Discovers companies, finds decision makers,
and generates personalized emails using AI research agents.
""")
# Step 1: Target Company Category Selection
st.header("1️⃣ Target Company Discovery")
col1, col2 = st.columns([2, 1])
with col1:
selected_category = st.selectbox(
"What type of companies should we target?",
options=list(COMPANY_CATEGORIES.keys()),
key="company_category"
)
st.info(f"📌 {COMPANY_CATEGORIES[selected_category]['description']}")
st.markdown("### Typical Decision Makers We'll Find:")
for role in COMPANY_CATEGORIES[selected_category]['typical_roles']:
st.markdown(f"- {role}")
with col2:
st.markdown("### Company Size Filter")
company_size = st.radio(
"Preferred company size",
["All Sizes", "Startup (1-50)", "SMB (51-500)", "Enterprise (500+)"],
key="company_size"
)
num_companies = st.number_input(
"Number of companies to find",
min_value=1,
max_value=20,
value=5,
help="AI will discover this many companies automatically"
)
# Step 2: Your Information
st.header("2️⃣ Your Contact Information")
col3, col4 = st.columns(2)
with col3:
st.subheader("Required Information")
sender_details = {
"name": st.text_input("Your Name *", key="sender_name"),
"email": st.text_input("Your Email *", key="sender_email"),
"organization": st.text_input("Your Organization *", key="sender_org")
}
with col4:
st.subheader("Optional Information")
sender_details.update({
"linkedin": st.text_input("LinkedIn Profile (optional)", key="sender_linkedin", placeholder="https://linkedin.com/in/yourname"),
"phone": st.text_input("Phone Number (optional)", key="sender_phone", placeholder="+1 (555) 123-4567"),
"website": st.text_input("Company Website (optional)", key="sender_website", placeholder="https://yourcompany.com"),
"calendar_link": st.text_input("Calendar Link (optional)", key="sender_calendar", placeholder="https://calendly.com/yourname")
})
# Service description
sender_details["service_offered"] = st.text_area(
"Describe your offering *",
height=100,
key="service_description",
help="Explain what you offer and how it helps businesses",
placeholder="We help companies build custom AI solutions that automate workflows and improve efficiency..."
)
# Step 3: Service Type and Targeting
st.header("3️⃣ Outreach Configuration")
col5, col6 = st.columns(2)
with col5:
service_type = st.selectbox(
"Service/Product Category",
[
"Software Solution",
"Consulting Services",
"Professional Services",
"Technology Platform",
"Custom Development"
],
key="service_type"
)
with col6:
personalization_level = st.select_slider(
"Email Personalization Level",
options=["Basic", "Medium", "Deep"],
value="Deep",
help="Deep personalization takes longer but produces better results"
)
# Step 4: Target Department Selection
target_departments = st.multiselect(
"Which departments should we target?",
[
"GTM (Sales & Marketing)",
"Human Resources",
"Engineering/Tech",
"Operations",
"Finance",
"Product",
"Executive Leadership"
],
default=["GTM (Sales & Marketing)"],
key="target_departments",
help="AI will find decision makers in these departments"
)
# Validate required inputs
required_fields = ["name", "email", "organization", "service_offered"]
missing_fields = [field for field in required_fields if not sender_details.get(field)]
if missing_fields:
st.error(f"Please fill in required fields: {', '.join(missing_fields)}")
st.stop()
if not target_departments:
st.error("Please select at least one target department")
st.stop()
if not selected_category:
st.error("Please select a company category")
st.stop()
if not service_type:
st.error("Please select a service type")
st.stop()
# Create and return configuration
outreach_config = OutreachConfig(
company_category=selected_category,
target_departments=target_departments,
service_type=service_type,
company_size_preference=company_size,
personalization_level=personalization_level
)
return outreach_config, sender_details, num_companies
def main():
"""
Main entry point for running the automated B2B outreach workflow.
"""
try:
# Set page config must be the first Streamlit command
st.set_page_config(
page_title="Automated B2B Email Outreach",
layout="wide",
initial_sidebar_state="expanded"
)
# API Keys in Sidebar
st.sidebar.header("🔑 API Configuration")
# Update API keys from sidebar
st.session_state.EXA_API_KEY = st.sidebar.text_input(
"Exa API Key *",
value=st.session_state.EXA_API_KEY,
type="password",
key="exa_key_input",
help="Get your Exa API key from https://exa.ai"
)
st.session_state.OPENAI_API_KEY = st.sidebar.text_input(
"OpenAI API Key *",
value=st.session_state.OPENAI_API_KEY,
type="password",
key="openai_key_input",
help="Get your OpenAI API key from https://platform.openai.com"
)
# Update environment variables
os.environ["EXA_API_KEY"] = st.session_state.EXA_API_KEY
os.environ["OPENAI_API_KEY"] = st.session_state.OPENAI_API_KEY
# Validate API keys
if not st.session_state.EXA_API_KEY or not st.session_state.OPENAI_API_KEY:
st.sidebar.error("⚠️ Both API keys are required to run the application")
else:
st.sidebar.success("✅ API keys configured")
# Add guidance about API keys
st.sidebar.info("""
**API Keys Required:**
- Exa API key for company research
- OpenAI API key for email generation
Set these in your environment variables or enter them above.
""")
# Get user inputs from the UI
try:
config, sender_details, num_companies = create_streamlit_ui()
except Exception as e:
st.error(f"Configuration error: {str(e)}")
st.stop()
# Generate Emails Section
st.header("4️⃣ Generate Outreach Campaign")
st.info(f"""
**Ready to launch automated prospecting:**
- Target: {config.company_category} companies ({config.company_size_preference})
- Departments: {', '.join(config.target_departments)}
- Service: {config.service_type}
- Companies to find: {num_companies}
""")
if st.button("🚀 Start Automated Campaign", key="generate_button", type="primary"):
# Check if API keys are configured
if not st.session_state.EXA_API_KEY or not st.session_state.OPENAI_API_KEY:
st.error("❌ Please configure both API keys before starting the campaign")
st.stop()
try:
# Progress tracking
progress_bar = st.progress(0)
status_text = st.empty()
results_container = st.container()
with st.spinner("Initializing AI research agents..."):
# Setup the database
db = SqliteDb(
db_file="tmp/agno_workflows.db",
)
workflow = PersonalisedEmailGenerator(
session_id="streamlit-email-generator",
db=db
)
status_text.text("🔍 Discovering companies and generating emails...")
# Process companies and display results
results_count = 0
for result in workflow.run(
config=config,
sender_details=sender_details,
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | true |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_journalist_agent/journalist_agent.py | advanced_ai_agents/single_agent_apps/ai_journalist_agent/journalist_agent.py | # Import the required libraries
from textwrap import dedent
from agno.agent import Agent
from agno.run.agent import RunOutput
from agno.tools.serpapi import SerpApiTools
from agno.tools.newspaper4k import Newspaper4kTools
import streamlit as st
from agno.models.openai import OpenAIChat
# Set up the Streamlit app
st.title("AI Journalist Agent 🗞️")
st.caption("Generate High-quality articles with AI Journalist by researching, wriritng and editing quality articles on autopilot using GPT-4o")
# Get OpenAI API key from user
openai_api_key = st.text_input("Enter OpenAI API Key to access GPT-4o", type="password")
# Get SerpAPI key from the user
serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password")
if openai_api_key and serp_api_key:
searcher = Agent(
name="Searcher",
role="Searches for top URLs based on a topic",
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
description=dedent(
"""\
You are a world-class journalist for the New York Times. Given a topic, generate a list of 3 search terms
for writing an article on that topic. Then search the web for each term, analyse the results
and return the 10 most relevant URLs.
"""
),
instructions=[
"Given a topic, first generate a list of 3 search terms related to that topic.",
"For each search term, `search_google` and analyze the results."
"From the results of all searcher, return the 10 most relevant URLs to the topic.",
"Remember: you are writing for the New York Times, so the quality of the sources is important.",
],
tools=[SerpApiTools(api_key=serp_api_key)],
add_datetime_to_context=True,
)
writer = Agent(
name="Writer",
role="Retrieves text from URLs and writes a high-quality article",
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
description=dedent(
"""\
You are a senior writer for the New York Times. Given a topic and a list of URLs,
your goal is to write a high-quality NYT-worthy article on the topic.
"""
),
instructions=[
"Given a topic and a list of URLs, first read the article using `get_article_text`."
"Then write a high-quality NYT-worthy article on the topic."
"The article should be well-structured, informative, and engaging",
"Ensure the length is at least as long as a NYT cover story -- at a minimum, 15 paragraphs.",
"Ensure you provide a nuanced and balanced opinion, quoting facts where possible.",
"Remember: you are writing for the New York Times, so the quality of the article is important.",
"Focus on clarity, coherence, and overall quality.",
"Never make up facts or plagiarize. Always provide proper attribution.",
],
tools=[Newspaper4kTools()],
add_datetime_to_context=True,
markdown=True,
)
editor = Agent(
name="Editor",
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
team=[searcher, writer],
description="You are a senior NYT editor. Given a topic, your goal is to write a NYT worthy article.",
instructions=[
"Given a topic, ask the search journalist to search for the most relevant URLs for that topic.",
"Then pass a description of the topic and URLs to the writer to get a draft of the article.",
"Edit, proofread, and refine the article to ensure it meets the high standards of the New York Times.",
"The article should be extremely articulate and well written. "
"Focus on clarity, coherence, and overall quality.",
"Ensure the article is engaging and informative.",
"Remember: you are the final gatekeeper before the article is published.",
],
add_datetime_to_context=True,
markdown=True,
)
# Input field for the report query
query = st.text_input("What do you want the AI journalist to write an Article on?")
if query:
with st.spinner("Processing..."):
# Get the response from the assistant
response: RunOutput = editor.run(query, stream=False)
st.write(response.content) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_recipe_meal_planning_agent/ai_recipe_meal_planning_agent.py | advanced_ai_agents/single_agent_apps/ai_recipe_meal_planning_agent/ai_recipe_meal_planning_agent.py | import asyncio
import os
import streamlit as st
import random
from textwrap import dedent
from typing import Dict, List, Optional
from agno.agent import Agent
from agno.run.agent import RunOutput
from agno.models.openai import OpenAIChat
from agno.tools import tool
import requests
from dotenv import load_dotenv
from agno.tools.duckduckgo import DuckDuckGoTools
load_dotenv()
SPOONACULAR_API_KEY = os.getenv("SPOONACULAR_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
@tool
def search_recipes(ingredients: str, diet_type: Optional[str] = None) -> Dict:
"""Search for detailed recipes with cooking instructions."""
if not SPOONACULAR_API_KEY:
return {"error": "Spoonacular API key not found"}
url = "https://api.spoonacular.com/recipes/findByIngredients"
params = {
"apiKey": SPOONACULAR_API_KEY,
"ingredients": ingredients,
"number": 5,
"ranking": 2,
"ignorePantry": True
}
if diet_type:
params["diet"] = diet_type
try:
response = requests.get(url, params=params, timeout=15)
response.raise_for_status()
recipes = response.json()
detailed_recipes = []
for recipe in recipes[:3]:
detail_url = f"https://api.spoonacular.com/recipes/{recipe['id']}/information"
detail_response = requests.get(detail_url, params={"apiKey": SPOONACULAR_API_KEY}, timeout=10)
if detail_response.status_code == 200:
detail_data = detail_response.json()
detailed_recipes.append({
"id": recipe['id'],
"title": recipe['title'],
"ready_in_minutes": detail_data.get('readyInMinutes', 'N/A'),
"servings": detail_data.get('servings', 'N/A'),
"health_score": detail_data.get('healthScore', 0),
"used_ingredients": [i['name'] for i in recipe['usedIngredients']],
"missing_ingredients": [i['name'] for i in recipe['missedIngredients']],
"instructions": detail_data.get('instructions', 'Instructions not available')
})
return {
"recipes": detailed_recipes,
"total_found": len(recipes)
}
except:
return {"error": "Recipe search failed"}
@tool
def analyze_nutrition(recipe_name: str) -> Dict:
"""Get nutrition analysis for a recipe by searching for it."""
if not SPOONACULAR_API_KEY:
return {"error": "API key not found"}
# First search for the recipe
search_url = "https://api.spoonacular.com/recipes/complexSearch"
search_params = {
"apiKey": SPOONACULAR_API_KEY,
"query": recipe_name,
"number": 1,
"addRecipeInformation": True,
"addRecipeNutrition": True
}
try:
search_response = requests.get(search_url, params=search_params, timeout=15)
search_response.raise_for_status()
search_data = search_response.json()
if not search_data.get('results'):
return {"error": f"No recipe found for '{recipe_name}'"}
recipe = search_data['results'][0]
if 'nutrition' not in recipe:
return {"error": "No nutrition data available for this recipe"}
nutrients = {n['name']: n['amount'] for n in recipe['nutrition']['nutrients']}
calories = round(nutrients.get('Calories', 0))
protein = round(nutrients.get('Protein', 0), 1)
carbs = round(nutrients.get('Carbohydrates', 0), 1)
fat = round(nutrients.get('Fat', 0), 1)
fiber = round(nutrients.get('Fiber', 0), 1)
sodium = round(nutrients.get('Sodium', 0), 1)
# Health insights
health_insights = []
if protein > 25:
health_insights.append("✅ High protein - great for muscle building")
if fiber > 5:
health_insights.append("✅ High fiber - supports digestive health")
if sodium < 600:
health_insights.append("✅ Low sodium - heart-friendly")
if calories < 400:
health_insights.append("✅ Low calorie - good for weight management")
return {
"recipe_title": recipe.get('title', 'Recipe'),
"servings": recipe.get('servings', 1),
"ready_in_minutes": recipe.get('readyInMinutes', 'N/A'),
"health_score": recipe.get('healthScore', 0),
"calories": calories,
"protein": protein,
"carbs": carbs,
"fat": fat,
"fiber": fiber,
"sodium": sodium,
"health_insights": health_insights
}
except:
return {"error": "Nutrition analysis failed"}
@tool
def estimate_costs(ingredients: List[str], servings: int = 4) -> Dict:
"""Detailed cost estimation with budget tips."""
prices = {
"chicken breast": 6.99, "ground beef": 5.99, "salmon": 12.99,
"rice": 2.99, "pasta": 1.99, "broccoli": 2.99, "tomatoes": 3.99,
"cheese": 5.99, "onion": 1.49, "garlic": 2.99, "olive oil": 7.99
}
cost_breakdown = []
total_cost = 0
for ingredient in ingredients:
ingredient_lower = ingredient.lower().strip()
cost = 3.99 # default
for key, price in prices.items():
if key in ingredient_lower or any(word in ingredient_lower for word in key.split()):
cost = price
break
adjusted_cost = (cost * servings) / 4
total_cost += adjusted_cost
cost_breakdown.append({
"name": ingredient.title(),
"cost": round(adjusted_cost, 2)
})
# Budget tips
budget_tips = []
if total_cost > 30:
budget_tips.append("💡 Consider buying in bulk for better prices")
if total_cost > 40:
budget_tips.append("💡 Look for seasonal alternatives to reduce costs")
budget_tips.append("💡 Shop at local markets for fresher, cheaper produce")
return {
"total_cost": round(total_cost, 2),
"cost_per_serving": round(total_cost / servings, 2),
"servings": servings,
"breakdown": cost_breakdown,
"budget_tips": budget_tips
}
@tool
def create_meal_plan(dietary_preference: str = "balanced", people: int = 2, days: int = 7, budget: str = "moderate") -> Dict:
"""Create comprehensive weekly meal plan with nutrition and shopping list."""
meals = {
"breakfast": [
{"name": "Overnight Oats with Berries", "calories": 320, "protein": 12, "cost": 2.50},
{"name": "Veggie Scramble with Toast", "calories": 280, "protein": 18, "cost": 3.20},
{"name": "Greek Yogurt Parfait", "calories": 250, "protein": 15, "cost": 2.80}
],
"lunch": [
{"name": "Quinoa Buddha Bowl", "calories": 420, "protein": 16, "cost": 4.50},
{"name": "Chicken Caesar Wrap", "calories": 380, "protein": 25, "cost": 5.20},
{"name": "Lentil Vegetable Soup", "calories": 340, "protein": 18, "cost": 3.80}
],
"dinner": [
{"name": "Grilled Salmon with Vegetables", "calories": 520, "protein": 35, "cost": 8.90},
{"name": "Chicken Stir Fry with Brown Rice", "calories": 480, "protein": 32, "cost": 6.50},
{"name": "Vegetable Curry with Quinoa", "calories": 450, "protein": 15, "cost": 5.20}
]
}
budget_multipliers = {"low": 0.7, "moderate": 1.0, "high": 1.3}
multiplier = budget_multipliers.get(budget.lower(), 1.0)
weekly_plan = {}
shopping_list = set()
total_weekly_cost = 0
total_weekly_calories = 0
total_weekly_protein = 0
day_names = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
for day in day_names[:days]:
daily_meals = {}
daily_calories = 0
daily_protein = 0
daily_cost = 0
for meal_type in ["breakfast", "lunch", "dinner"]:
selected_meal = random.choice(meals[meal_type])
daily_meals[meal_type] = {
"name": selected_meal["name"],
"calories": selected_meal["calories"],
"protein": selected_meal["protein"]
}
meal_cost = selected_meal["cost"] * people * multiplier
daily_calories += selected_meal["calories"]
daily_protein += selected_meal["protein"]
daily_cost += meal_cost
# Add to shopping list
if "chicken" in selected_meal["name"].lower():
shopping_list.add("Chicken breast")
if "salmon" in selected_meal["name"].lower():
shopping_list.add("Salmon fillets")
if "vegetable" in selected_meal["name"].lower():
shopping_list.update(["Mixed vegetables", "Onions", "Garlic"])
if "quinoa" in selected_meal["name"].lower():
shopping_list.add("Quinoa")
if "oats" in selected_meal["name"].lower():
shopping_list.add("Rolled oats")
weekly_plan[day] = daily_meals
total_weekly_cost += daily_cost
total_weekly_calories += daily_calories
total_weekly_protein += daily_protein
# Generate insights
avg_daily_calories = round(total_weekly_calories / days)
avg_daily_protein = round(total_weekly_protein / days, 1)
insights = []
if avg_daily_calories < 1800:
insights.append("⚠️ Consider adding healthy snacks to meet calorie needs")
elif avg_daily_calories > 2200:
insights.append("💡 Calorie-dense meals - great for active lifestyles")
if avg_daily_protein > 80:
insights.append("✅ Excellent protein intake for muscle maintenance")
elif avg_daily_protein < 60:
insights.append("💡 Consider adding more protein sources")
return {
"meal_plan": weekly_plan,
"total_weekly_cost": round(total_weekly_cost, 2),
"cost_per_person_per_day": round(total_weekly_cost / (people * days), 2),
"avg_daily_calories": avg_daily_calories,
"avg_daily_protein": avg_daily_protein,
"dietary_preference": dietary_preference,
"serves": people,
"days": days,
"shopping_list": sorted(list(shopping_list)),
"insights": insights
}
async def create_agent():
agent = Agent(
name="MealPlanningExpert",
model=OpenAIChat(id="gpt-5-mini"),
tools=[search_recipes, analyze_nutrition, estimate_costs, create_meal_plan, DuckDuckGoTools()],
instructions=dedent("""\
You are an expert meal planning assistant. Provide detailed, helpful responses:
🔍 **Recipe Searches**: Include cooking time, health scores, ingredient lists, and instructions
📊 **Nutrition Analysis**: Provide health insights, nutritional breakdowns, and dietary advice
💰 **Cost Estimation**: Include budget tips and cost per serving breakdowns
📅 **Meal Planning**: Create detailed weekly plans with nutritional balance and shopping lists
**Always**:
- Use clear headings and bullet points
- Include practical cooking tips
- Consider dietary restrictions and budgets
- Provide actionable next steps
- Be encouraging and supportive
"""),
markdown=True,
debug_mode=True
)
return agent
def main():
st.set_page_config(page_title="AI Meal Planning Agent", page_icon="🍽️", layout="wide")
st.title("🍽️ AI Meal Planning Agent")
st.markdown("*Your intelligent companion for recipes, nutrition, and meal planning*")
if not OPENAI_API_KEY:
st.error("Please add OPENAI_API_KEY to your .env file")
st.stop()
# Initialize agent
if "agent" not in st.session_state:
with st.spinner("Initializing agent..."):
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
st.session_state.agent = loop.run_until_complete(create_agent())
except Exception as e:
st.error(f"Failed to initialize agent: {e}")
st.stop()
# Initialize messages
if "messages" not in st.session_state:
st.session_state.messages = [{
"role": "assistant",
"content": """👋 **Welcome! I'm your AI Meal Planning Expert.**
I can help you with:
- 🔍 **Recipe Discovery** - Find recipes based on your ingredients
- 📊 **Nutrition Analysis** - Get detailed nutritional insights
- 💰 **Cost Estimation** - Smart budget planning with money-saving tips
- 📅 **Meal Planning** - Complete weekly meal plans with shopping lists
**Try asking:**
- "Find healthy chicken recipes for dinner"
- "What's the nutrition info for chicken teriyaki?"
- "Create a vegetarian meal plan for 2 people for one week"
- "Estimate costs for pasta, tomatoes, cheese, and basil for 4 servings"
What would you like to explore? 🍽️"""
}]
# Chat interface
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if user_input := st.chat_input("Ask about recipes, nutrition, meal planning, or costs..."):
st.session_state.messages.append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
response: RunOutput = loop.run_until_complete(
st.session_state.agent.arun(user_input)
)
st.markdown(response.content)
st.session_state.messages.append({
"role": "assistant",
"content": response.content
})
except Exception as e:
error_msg = f"Error: {str(e)}"
st.error(error_msg)
st.session_state.messages.append({
"role": "assistant",
"content": error_msg
})
if __name__ == "__main__":
main() | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/main.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/main.py | # main.py
from langchain_google_genai import ChatGoogleGenerativeAI
from windows_use.agent import Agent
from dotenv import load_dotenv
load_dotenv()
llm=ChatGoogleGenerativeAI(model='gemini-2.0-flash')
instructions=['We have Claude Desktop, Perplexity and ChatGPT App installed on the desktop so if you need any help, just ask your AI friends.']
agent = Agent(instructions=instructions,llm=llm,use_vision=True)
query=input("Enter your query: ")
agent_result=agent.invoke(query=query)
print(agent_result.content) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/__init__.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/__init__.py | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false | |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/desktop/views.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/desktop/views.py | from windows_use.tree.views import TreeState
from dataclasses import dataclass
from typing import Literal,Optional
@dataclass
class App:
name:str
depth:int
status:Literal['Maximized','Minimized','Normal']
size:'Size'
def to_string(self):
return f'Name: {self.name}|Depth: {self.depth}|Status: {self.status}|Size: {self.size.to_string()}'
@dataclass
class Size:
width:int
height:int
def to_string(self):
return f'({self.width},{self.height})'
@dataclass
class DesktopState:
apps:list[App]
active_app:Optional[App]
screenshot:bytes|None
tree_state:TreeState
def active_app_to_string(self):
if self.active_app is None:
return 'No active app'
return self.active_app.to_string()
def apps_to_string(self):
if len(self.apps)==0:
return 'No apps opened'
return '\n'.join([app.to_string() for app in self.apps]) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/desktop/config.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/desktop/config.py | from typing import Set
AVOIDED_APPS:Set[str]=set([
'Recording toolbar'
])
EXCLUDED_APPS:Set[str]=set([
'Program Manager','Taskbar'
]).union(AVOIDED_APPS) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/desktop/__init__.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/desktop/__init__.py | from uiautomation import GetScreenSize, Control, GetRootControl, ControlType, GetFocusedControl
from windows_use.desktop.views import DesktopState,App,Size
from windows_use.desktop.config import EXCLUDED_APPS
from PIL.Image import Image as PILImage
from windows_use.tree import Tree
from fuzzywuzzy import process
from time import sleep
from io import BytesIO
from PIL import Image
import subprocess
import pyautogui
import base64
import csv
import io
class Desktop:
def __init__(self):
self.desktop_state=None
def get_state(self,use_vision:bool=False)->DesktopState:
tree=Tree(self)
apps=self.get_apps()
tree_state=tree.get_state()
active_app,apps=(apps[0],apps[1:]) if len(apps)>0 else (None,[])
if use_vision:
annotated_screenshot=tree.annotate(tree_state.interactive_nodes)
screenshot=self.screenshot_in_bytes(annotated_screenshot)
else:
screenshot=None
self.desktop_state=DesktopState(apps=apps,active_app=active_app,screenshot=screenshot,tree_state=tree_state)
return self.desktop_state
def get_taskbar(self)->Control:
root=GetRootControl()
taskbar=root.GetFirstChildControl()
return taskbar
def get_app_status(self,control:Control)->str:
taskbar=self.get_taskbar()
taskbar_height=taskbar.BoundingRectangle.height()
window = control.BoundingRectangle
screen_width, screen_height = GetScreenSize()
window_width,window_height=window.width(),window.height()
if window.isempty():
return "Minimized"
if window_width >= screen_width and window_height >= screen_height - taskbar_height:
return "Maximized"
return "Normal"
def get_element_under_cursor(self)->Control:
return GetFocusedControl()
def get_apps_from_start_menu(self)->dict[str,str]:
command='Get-StartApps | ConvertTo-Csv -NoTypeInformation'
apps_info,_=self.execute_command(command)
reader=csv.DictReader(io.StringIO(apps_info))
return {row.get('Name').lower():row.get('AppID') for row in reader}
def execute_command(self,command:str)->tuple[str,int]:
try:
result = subprocess.run(['powershell', '-Command']+command.split(),
capture_output=True, check=True)
return (result.stdout.decode('latin1'),result.returncode)
except subprocess.CalledProcessError as e:
return (e.stdout.decode('latin1'),e.returncode)
def launch_app(self,name:str):
apps_map=self.get_apps_from_start_menu()
matched_app=process.extractOne(name,apps_map.keys())
if matched_app is None:
return (f'Application {name.title()} not found in start menu.',1)
app_name,_=matched_app
appid=apps_map.get(app_name)
if appid is None:
return (f'Application {name.title()} not found in start menu.',1)
if name.endswith('.exe'):
response,status=self.execute_command(f'Start-Process "{appid}"')
else:
response,status=self.execute_command(f'Start-Process "shell:AppsFolder\\{appid}"')
return response,status
def get_app_size(self,control:Control):
window=control.BoundingRectangle
if window.isempty():
return Size(width=0,height=0)
return Size(width=window.width(),height=window.height())
def is_app_visible(self,app)->bool:
is_minimized=self.get_app_status(app)!='Minimized'
size=self.get_app_size(app)
area=size.width*size.height
is_overlay=self.is_overlay_app(app)
return not is_overlay and is_minimized and area>10
def is_overlay_app(self,element:Control) -> bool:
no_children = len(element.GetChildren()) == 0
is_name = "Overlay" in element.Name.strip()
return no_children or is_name
def get_apps(self) -> list[App]:
try:
sleep(0.75)
desktop = GetRootControl() # Get the desktop control
elements = desktop.GetChildren()
apps = []
for depth, element in enumerate(elements):
if element.Name in EXCLUDED_APPS or self.is_overlay_app(element):
continue
if element.ControlType in [ControlType.WindowControl, ControlType.PaneControl]:
status = self.get_app_status(element)
size=self.get_app_size(element)
apps.append(App(name=element.Name, depth=depth, status=status,size=size))
except Exception as ex:
print(f"Error: {ex}")
apps = []
return apps
def screenshot_in_bytes(self,screenshot:PILImage)->bytes:
buffer=BytesIO()
screenshot.save(buffer,format='PNG')
img_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
data_uri = f"data:image/png;base64,{img_base64}"
return data_uri
def get_screenshot(self,scale:float=0.7)->Image:
screenshot=pyautogui.screenshot()
size=(screenshot.width*scale, screenshot.height*scale)
screenshot.thumbnail(size=size, resample=Image.Resampling.LANCZOS)
return screenshot | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/views.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/views.py | from langchain_core.messages.base import BaseMessage
from pydantic import BaseModel,Field
from typing import Optional
from uuid import uuid4
class AgentState(BaseModel):
id: str = Field(default_factory=lambda: str(uuid4()))
consecutive_failures: int = 0
result: str = ''
agent_data: 'AgentData' = None
messages: list[BaseMessage] = Field(default_factory=list)
previous_observation: str = None
def is_done(self):
return self.agent_data is not None and self.agent_data.action.name == 'Done Tool'
def initialize_state(self, messages: list[BaseMessage]):
self.consecutive_failures = 0
self.result = ""
self.messages = messages
def update_state(self, agent_data: 'AgentData' = None, observation: str = None, result: str = None, messages: list[BaseMessage] = None):
self.result = result
self.previous_observation = observation
self.agent_data = agent_data
self.messages.extend(messages or [])
class AgentStep(BaseModel):
step_number: int=0
max_steps: int
def is_last_step(self):
return self.step_number >= self.max_steps-1
def increment_step(self):
self.step_number += 1
class AgentResult(BaseModel):
is_done:bool|None=False
content:str|None=None
error:str|None=None
class Action(BaseModel):
name:str
params: dict
class AgentData(BaseModel):
evaluate: Optional[str]=None
memory: Optional[str]=None
thought: Optional[str]=None
action: Optional[Action]=None
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/service.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/service.py | from windows_use.agent.tools.service import click_tool, type_tool, launch_tool, shell_tool, clipboard_tool, done_tool, shortcut_tool, scroll_tool, drag_tool, move_tool, key_tool, wait_tool, scrape_tool
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from windows_use.agent.views import AgentState, AgentStep, AgentResult
from windows_use.agent.utils import extract_agent_data, image_message
from langchain_core.language_models.chat_models import BaseChatModel
from windows_use.agent.registry.views import ToolResult
from windows_use.agent.registry.service import Registry
from windows_use.agent.prompt.service import Prompt
from langchain_core.tools import BaseTool
from windows_use.desktop import Desktop
from termcolor import colored
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class Agent:
'''
Windows Use
An agent that can interact with GUI elements on Windows
Args:
instructions (list[str], optional): Instructions for the agent. Defaults to [].
additional_tools (list[BaseTool], optional): Additional tools for the agent. Defaults to [].
llm (BaseChatModel): Language model for the agent. Defaults to None.
max_steps (int, optional): Maximum number of steps for the agent. Defaults to 100.
use_vision (bool, optional): Whether to use vision for the agent. Defaults to False.
Returns:
Agent
'''
def __init__(self,instructions:list[str]=[],additional_tools:list[BaseTool]=[], llm: BaseChatModel=None,max_steps:int=100,use_vision:bool=False):
self.name='Windows Use'
self.description='An agent that can interact with GUI elements on Windows'
self.registry = Registry([
click_tool,type_tool, launch_tool, shell_tool, clipboard_tool,
done_tool, shortcut_tool, scroll_tool, drag_tool, move_tool,
key_tool, wait_tool, scrape_tool
] + additional_tools)
self.instructions=instructions
self.desktop = Desktop()
self.agent_state = AgentState()
self.agent_step = AgentStep(max_steps=max_steps)
self.use_vision=use_vision
self.llm = llm
def reason(self):
message=self.llm.invoke(self.agent_state.messages)
agent_data = extract_agent_data(message=message)
self.agent_state.update_state(agent_data=agent_data, messages=[message])
logger.info(colored(f"💭: Thought: {agent_data.thought}",color='light_magenta',attrs=['bold']))
def action(self):
self.agent_state.messages.pop() # Remove the last message to avoid duplication
last_message = self.agent_state.messages[-1]
if isinstance(last_message, HumanMessage):
self.agent_state.messages[-1]=HumanMessage(content=Prompt.previous_observation_prompt(self.agent_state.previous_observation))
ai_message = AIMessage(content=Prompt.action_prompt(agent_data=self.agent_state.agent_data))
name = self.agent_state.agent_data.action.name
params = self.agent_state.agent_data.action.params
logger.info(colored(f"🔧: Action: {name}({', '.join(f'{k}={v}' for k, v in params.items())})",color='blue',attrs=['bold']))
tool_result = self.registry.execute(tool_name=name, desktop=self.desktop, **params)
observation=tool_result.content if tool_result.is_success else tool_result.error
logger.info(colored(f"🔭: Observation: {observation}",color='green',attrs=['bold']))
desktop_state = self.desktop.get_state(use_vision=self.use_vision)
prompt=Prompt.observation_prompt(agent_step=self.agent_step, tool_result=tool_result, desktop_state=desktop_state)
human_message=image_message(prompt=prompt,image=desktop_state.screenshot) if self.use_vision and desktop_state.screenshot else HumanMessage(content=prompt)
self.agent_state.update_state(agent_data=None,observation=observation,messages=[ai_message, human_message])
def answer(self):
self.agent_state.messages.pop() # Remove the last message to avoid duplication
last_message = self.agent_state.messages[-1]
if isinstance(last_message, HumanMessage):
self.agent_state.messages[-1]=HumanMessage(content=Prompt.previous_observation_prompt(self.agent_state.previous_observation))
name = self.agent_state.agent_data.action.name
params = self.agent_state.agent_data.action.params
tool_result = self.registry.execute(tool_name=name, desktop=None, **params)
ai_message = AIMessage(content=Prompt.answer_prompt(agent_data=self.agent_state.agent_data, tool_result=tool_result))
logger.info(colored(f"📜: Final Answer: {tool_result.content}",color='cyan',attrs=['bold']))
self.agent_state.update_state(agent_data=None,observation=None,result=tool_result.content,messages=[ai_message])
def invoke(self,query: str):
max_steps = self.agent_step.max_steps
tools_prompt = self.registry.get_tools_prompt()
desktop_state = self.desktop.get_state(use_vision=self.use_vision)
prompt=Prompt.observation_prompt(agent_step=self.agent_step, tool_result=ToolResult(is_success=True, content="No Action"), desktop_state=desktop_state)
system_message=SystemMessage(content=Prompt.system_prompt(instructions=self.instructions,tools_prompt=tools_prompt,max_steps=max_steps))
human_message=image_message(prompt=prompt,image=desktop_state.screenshot) if self.use_vision and desktop_state.screenshot else HumanMessage(content=prompt)
messages=[system_message,HumanMessage(content=f'Task: {query}'),human_message]
self.agent_state.initialize_state(messages=messages)
while True:
if self.agent_step.is_last_step():
logger.info("Reached maximum number of steps, stopping execution.")
return AgentResult(is_done=False, content=None, error="Maximum steps reached.")
self.reason()
if self.agent_state.is_done():
self.answer()
return AgentResult(is_done=True, content=self.agent_state.result, error=None)
self.action()
if self.agent_state.consecutive_failures >= 3:
logger.warning("Consecutive failures exceeded limit, stopping execution.")
return AgentResult(is_done=False, content=None, error="Consecutive failures exceeded limit.")
self.agent_step.increment_step() | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/utils.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/utils.py | from langchain_core.messages import BaseMessage,HumanMessage
from windows_use.agent.views import AgentData
import ast
import re
def read_file(file_path: str) -> str:
with open(file_path, 'r') as file:
return file.read()
def extract_agent_data(message: BaseMessage) -> AgentData:
text = message.content
# Dictionary to store extracted values
result = {}
# Extract Memory
memory_match = re.search(r"<Memory>(.*?)<\/Memory>", text, re.DOTALL)
if memory_match:
result['memory'] = memory_match.group(1).strip()
# Extract Evaluate
evaluate_match = re.search(r"<Evaluate>(.*?)<\/Evaluate>", text, re.DOTALL)
if evaluate_match:
result['evaluate'] = evaluate_match.group(1).strip()
# Extract Thought
thought_match = re.search(r"<Thought>(.*?)<\/Thought>", text, re.DOTALL)
if thought_match:
result['thought'] = thought_match.group(1).strip()
# Extract Action-Name
action = {}
action_name_match = re.search(r"<Action-Name>(.*?)<\/Action-Name>", text, re.DOTALL)
if action_name_match:
action['name'] = action_name_match.group(1).strip()
# Extract and convert Action-Input to a dictionary
action_input_match = re.search(r"<Action-Input>(.*?)<\/Action-Input>", text, re.DOTALL)
if action_input_match:
action_input_str = action_input_match.group(1).strip()
try:
# Convert string to dictionary safely using ast.literal_eval
action['params'] = ast.literal_eval(action_input_str)
except (ValueError, SyntaxError):
# If there's an issue with conversion, store it as raw string
action['params'] = action_input_str
result['action'] = action
return AgentData.model_validate(result)
def image_message(prompt,image)->HumanMessage:
return HumanMessage(content=[
{
"type": "text",
"text": prompt,
},
{
"type": "image_url",
"image_url": image
},
]) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/__init__.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/__init__.py | from windows_use.agent.service import Agent
__all__=[
'Agent'
] | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/tools/views.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/tools/views.py | from pydantic import BaseModel,Field
from typing import Literal
class SharedBaseModel(BaseModel):
class Config:
extra='allow'
class Done(SharedBaseModel):
answer:str = Field(...,description="the detailed final answer to the user query in proper markdown format",examples=["The task is completed successfully."])
class Clipboard(SharedBaseModel):
mode:Literal['copy','paste'] = Field(...,description="the mode of the clipboard",examples=['Copy'])
text:str = Field(...,description="the text to copy to clipboard",examples=["hello world"])
class Click(SharedBaseModel):
loc:tuple[int,int]=Field(...,description="The coordinates of the element to click on.",examples=[(0,0)])
button:Literal['left','right','middle']=Field(description='The button to click on the element.',default='left',examples=['left'])
clicks:Literal[0,1,2]=Field(description="The number of times to click on the element. (0 for hover, 1 for single click, 2 for double click)",default=2,examples=[0])
class Shell(SharedBaseModel):
command:str=Field(...,description="The PowerShell command to execute.",examples=['Get-Process'])
class Type(SharedBaseModel):
loc:tuple[int,int]=Field(...,description="The coordinates of the element to type on.",examples=[(0,0)])
text:str=Field(...,description="The text to type on the element.",examples=['hello world'])
clear:Literal['true','false']=Field(description="To clear the text field before typing.",default='false',examples=['true'])
caret_position:Literal['start','idle','end']=Field(description="The position of the caret.",default='idle',examples=['start','idle','end'])
class Launch(SharedBaseModel):
name:str=Field(...,description="The name of the application to launch.",examples=['Google Chrome'])
class Scroll(SharedBaseModel):
loc:tuple[int,int]|None=Field(description="The coordinates of the element to scroll on. If None, the screen will be scrolled.",default=None,examples=[(0,0)])
type:Literal['horizontal','vertical']=Field(description="The type of scroll.",default='vertical',examples=['vertical'])
direction:Literal['up','down','left','right']=Field(description="The direction of the scroll.",default=['down'],examples=['down'])
wheel_times:int=Field(description="The number of times to scroll.",default=1,examples=[1,2,5])
class Drag(SharedBaseModel):
from_loc:tuple[int,int]=Field(...,description="The from coordinates of the drag.",examples=[(0,0)])
to_loc:tuple[int,int]=Field(...,description="The to coordinates of the drag.",examples=[(100,100)])
class Move(SharedBaseModel):
to_loc:tuple[int,int]=Field(...,description="The coordinates to move to.",examples=[(100,100)])
class Shortcut(SharedBaseModel):
shortcut:list[str]=Field(...,description="The shortcut to execute by pressing the keys.",examples=[['ctrl','a'],['alt','f4']])
class Key(SharedBaseModel):
key:str=Field(...,description="The key to press.",examples=['enter'])
class Wait(SharedBaseModel):
duration:int=Field(...,description="The duration to wait in seconds.",examples=[5])
class Scrape(SharedBaseModel):
url:str=Field(...,description="The url of the webpage to scrape.",examples=['https://google.com']) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/tools/service.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/tools/service.py | from windows_use.agent.tools.views import Click, Type, Launch, Scroll, Drag, Move, Shortcut, Key, Wait, Scrape,Done, Clipboard, Shell
from windows_use.desktop import Desktop
from humancursor import SystemCursor
from markdownify import markdownify
from langchain.tools import tool
from typing import Literal
import uiautomation as ua
import pyperclip as pc
import pyautogui as pg
import requests
cursor=SystemCursor()
@tool('Done Tool',args_schema=Done)
def done_tool(answer:str,desktop:Desktop=None):
'''To indicate that the task is completed'''
return answer
@tool('Launch Tool',args_schema=Launch)
def launch_tool(name: str,desktop:Desktop=None) -> str:
'Launch an application present in start menu (e.g., "notepad", "calculator", "chrome")'
_,status=desktop.launch_app(name)
if status!=0:
return f'Failed to launch {name.title()}.'
else:
return f'Launched {name.title()}.'
@tool('Shell Tool',args_schema=Shell)
def shell_tool(command: str,desktop:Desktop=None) -> str:
'Execute PowerShell commands and return the output with status code'
response,status=desktop.execute_command(command)
return f'Status Code: {status}\nResponse: {response}'
@tool('Clipboard Tool',args_schema=Clipboard)
def clipboard_tool(mode: Literal['copy', 'paste'], text: str = None,desktop:Desktop=None)->str:
'Copy text to clipboard or retrieve current clipboard content. Use "copy" mode with text parameter to copy, "paste" mode to retrieve.'
if mode == 'copy':
if text:
pc.copy(text) # Copy text to system clipboard
return f'Copied "{text}" to clipboard'
else:
raise ValueError("No text provided to copy")
elif mode == 'paste':
clipboard_content = pc.paste() # Get text from system clipboard
return f'Clipboard Content: "{clipboard_content}"'
else:
raise ValueError('Invalid mode. Use "copy" or "paste".')
@tool('Click Tool',args_schema=Click)
def click_tool(loc:tuple[int,int],button:Literal['left','right','middle']='left',clicks:int=1,desktop:Desktop=None)->str:
'Click on UI elements at specific coordinates. Supports left/right/middle mouse buttons and single/double/triple clicks. Use coordinates from State-Tool output.'
x,y=loc
cursor.move_to(loc)
control=desktop.get_element_under_cursor()
pg.click(button=button,clicks=clicks)
num_clicks={1:'Single',2:'Double',3:'Triple'}
return f'{num_clicks.get(clicks)} {button} Clicked on {control.Name} Element with ControlType {control.ControlTypeName} at ({x},{y}).'
@tool('Type Tool',args_schema=Type)
def type_tool(loc:tuple[int,int],text:str,clear:str='false',caret_position:Literal['start','idle','end']='idle',desktop:Desktop=None):
'Type text into input fields, text areas, or focused elements. Set clear=True to replace existing text, False to append. Click on target element coordinates first.'
x,y=loc
cursor.click_on(loc)
control=desktop.get_element_under_cursor()
if caret_position == 'start':
pg.press('home')
elif caret_position == 'end':
pg.press('end')
else:
pass
if clear=='true':
pg.hotkey('ctrl','a')
pg.press('backspace')
pg.typewrite(text,interval=0.1)
return f'Typed {text} on {control.Name} Element with ControlType {control.ControlTypeName} at ({x},{y}).'
@tool('Scroll Tool',args_schema=Scroll)
def scroll_tool(loc:tuple[int,int]=None,type:Literal['horizontal','vertical']='vertical',direction:Literal['up','down','left','right']='down',wheel_times:int=1,desktop:Desktop=None)->str:
'Scroll at specific coordinates or current mouse position. Use wheel_times to control scroll amount (1 wheel = ~3-5 lines). Essential for navigating lists, web pages, and long content.'
if loc:
cursor.move_to(loc)
match type:
case 'vertical':
match direction:
case 'up':
ua.WheelUp(wheel_times)
case 'down':
ua.WheelDown(wheel_times)
case _:
return 'Invalid direction. Use "up" or "down".'
case 'horizontal':
match direction:
case 'left':
pg.keyDown('Shift')
pg.sleep(0.05)
ua.WheelUp(wheel_times)
pg.sleep(0.05)
pg.keyUp('Shift')
case 'right':
pg.keyDown('Shift')
pg.sleep(0.05)
ua.WheelDown(wheel_times)
pg.sleep(0.05)
pg.keyUp('Shift')
case _:
return 'Invalid direction. Use "left" or "right".'
case _:
return 'Invalid type. Use "horizontal" or "vertical".'
return f'Scrolled {type} {direction} by {wheel_times} wheel times.'
@tool('Drag Tool',args_schema=Drag)
def drag_tool(from_loc:tuple[int,int],to_loc:tuple[int,int],desktop:Desktop=None)->str:
'Drag and drop operation from source coordinates to destination coordinates. Useful for moving files, resizing windows, or drag-and-drop interactions.'
control=desktop.get_element_under_cursor()
x1,y1=from_loc
x2,y2=to_loc
cursor.drag_and_drop(from_loc,to_loc)
return f'Dragged the {control.Name} element with ControlType {control.ControlTypeName} from ({x1},{y1}) to ({x2},{y2}).'
@tool('Move Tool',args_schema=Move)
def move_tool(to_loc:tuple[int,int],desktop:Desktop=None)->str:
'Move mouse cursor to specific coordinates without clicking. Useful for hovering over elements or positioning cursor before other actions.'
x,y=to_loc
cursor.move_to(to_loc)
return f'Moved the mouse pointer to ({x},{y}).'
@tool('Shortcut Tool',args_schema=Shortcut)
def shortcut_tool(shortcut:list[str],desktop:Desktop=None):
'Execute keyboard shortcuts using key combinations. Pass keys as list (e.g., ["ctrl", "c"] for copy, ["alt", "tab"] for app switching, ["win", "r"] for Run dialog).'
pg.hotkey(*shortcut)
return f'Pressed {'+'.join(shortcut)}.'
@tool('Key Tool',args_schema=Key)
def key_tool(key:str='',desktop:Desktop=None)->str:
'Press individual keyboard keys. Supports special keys like "enter", "escape", "tab", "space", "backspace", "delete", arrow keys ("up", "down", "left", "right"), function keys ("f1"-"f12").'
pg.press(key)
return f'Pressed the key {key}.'
@tool('Wait Tool',args_schema=Wait)
def wait_tool(duration:int,desktop:Desktop=None)->str:
'Pause execution for specified duration in seconds. Useful for waiting for applications to load, animations to complete, or adding delays between actions.'
pg.sleep(duration)
return f'Waited for {duration} seconds.'
@tool('Scrape Tool',args_schema=Scrape)
def scrape_tool(url:str,desktop:Desktop=None)->str:
'Fetch and convert webpage content to markdown format. Provide full URL including protocol (http/https). Returns structured text content suitable for analysis.'
response=requests.get(url,timeout=10)
html=response.text
content=markdownify(html=html)
return f'Scraped the contents of the entire webpage:\n{content}' | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/tools/__init__.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/tools/__init__.py | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false | |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/prompt/service.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/prompt/service.py | from windows_use.agent.registry.views import ToolResult
from windows_use.agent.views import AgentStep, AgentData
from windows_use.desktop.views import DesktopState
from langchain.prompts import PromptTemplate
from importlib.resources import files
from datetime import datetime
from getpass import getuser
from textwrap import dedent
from pathlib import Path
import pyautogui as pg
import platform
class Prompt:
@staticmethod
def system_prompt(tools_prompt:str,max_steps:int,instructions: list[str]=[]) -> str:
width, height = pg.size()
template =PromptTemplate.from_file(files('windows_use.agent.prompt').joinpath('system.md'))
return template.format(**{
'current_datetime': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'instructions': '\n'.join(instructions),
'tools_prompt': tools_prompt,
'os':platform.system(),
'home_dir':Path.home().as_posix(),
'user':getuser(),
'resolution':f'{width}x{height}',
'max_steps': max_steps
})
@staticmethod
def action_prompt(agent_data:AgentData) -> str:
template = PromptTemplate.from_file(files('windows_use.agent.prompt').joinpath('action.md'))
return template.format(**{
'evaluate': agent_data.evaluate,
'memory': agent_data.memory,
'thought': agent_data.thought,
'action_name': agent_data.action.name,
'action_input': agent_data.action.params
})
@staticmethod
def previous_observation_prompt(observation: str)-> str:
template=PromptTemplate.from_template(dedent('''
```xml
<Observation>{observation}</Observation>
```
'''))
return template.format(**{'observation': observation})
@staticmethod
def observation_prompt(agent_step: AgentStep, tool_result:ToolResult,desktop_state: DesktopState) -> str:
cursor_position = pg.position()
tree_state = desktop_state.tree_state
template = PromptTemplate.from_file(files('windows_use.agent.prompt').joinpath('observation.md'))
return template.format(**{
'steps': agent_step.step_number,
'max_steps': agent_step.max_steps,
'observation': tool_result.content if tool_result.is_success else tool_result.error,
'active_app': desktop_state.active_app_to_string(),
'cursor_location': f'{cursor_position.x},{cursor_position.y}',
'apps': desktop_state.apps_to_string(),
'interactive_elements': tree_state.interactive_elements_to_string() or 'No interactive elements found',
'informative_elements': tree_state.informative_elements_to_string() or 'No informative elements found',
'scrollable_elements': tree_state.scrollable_elements_to_string() or 'No scrollable elements found',
})
@staticmethod
def answer_prompt(agent_data: AgentData, tool_result: ToolResult):
template = PromptTemplate.from_file(files('windows_use.agent.prompt').joinpath('answer.md'))
return template.format(**{
'evaluate': agent_data.evaluate,
'memory': agent_data.memory,
'thought': agent_data.thought,
'final_answer': tool_result.content
})
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/registry/views.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/registry/views.py | from pydantic import BaseModel
from typing import Callable
class Tool(BaseModel):
name:str
description:str
function: Callable
params: dict
class ToolResult(BaseModel):
is_success: bool
content: str | None = None
error: str | None = None | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/registry/service.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/agent/registry/service.py | from windows_use.agent.registry.views import Tool as ToolData, ToolResult
from windows_use.desktop import Desktop
from langchain.tools import Tool
from textwrap import dedent
class Registry:
def __init__(self,tools:list[Tool]):
self.tools=tools
self.tools_registry=self.registry()
def tool_prompt(self, tool_name: str) -> str:
tool = self.tools_registry.get(tool_name)
return dedent(f"""
Tool Name: {tool.name}
Description: {tool.description}
Parameters: {tool.params}
""")
def registry(self):
return {tool.name: ToolData(
name=tool.name,
description=tool.description,
params=tool.args,
function=tool.run
) for tool in self.tools}
def get_tools_prompt(self) -> str:
tools_prompt = [self.tool_prompt(tool.name) for tool in self.tools]
return dedent(f"""
Available Tools:
{'\n\n'.join(tools_prompt)}
""")
def execute(self, tool_name: str, desktop: Desktop, **kwargs) -> ToolResult:
tool = self.tools_registry.get(tool_name)
if tool is None:
return ToolResult(is_success=False, error=f"Tool '{tool_name}' not found.")
try:
content = tool.function(tool_input={'desktop':desktop}|kwargs)
return ToolResult(is_success=True, content=content)
except Exception as error:
return ToolResult(is_success=False, error=str(error)) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/tree/views.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/tree/views.py | from dataclasses import dataclass,field
@dataclass
class TreeState:
interactive_nodes:list['TreeElementNode']=field(default_factory=[])
informative_nodes:list['TextElementNode']=field(default_factory=[])
scrollable_nodes:list['ScrollElementNode']=field(default_factory=[])
def interactive_elements_to_string(self)->str:
return '\n'.join([f'Label: {index} App Name: {node.app_name} ControlType: {f'{node.control_type} Control'} Name: {node.name} Shortcut: {node.shortcut} Cordinates: {node.center.to_string()}' for index,node in enumerate(self.interactive_nodes)])
def informative_elements_to_string(self)->str:
return '\n'.join([f'App Name: {node.app_name} Name: {node.name}' for node in self.informative_nodes])
def scrollable_elements_to_string(self)->str:
n=len(self.interactive_nodes)
return '\n'.join([f'Label: {n+index} App Name: {node.app_name} ControlType: {f'{node.control_type} Control'} Name: {node.name} Cordinates: {node.center.to_string()} Horizontal Scrollable: {node.horizontal_scrollable} Vertical Scrollable: {node.vertical_scrollable}' for index,node in enumerate(self.scrollable_nodes)])
@dataclass
class BoundingBox:
left:int
top:int
right:int
bottom:int
def to_string(self):
return f'({self.left},{self.top},{self.right},{self.bottom})'
@dataclass
class Center:
x:int
y:int
def to_string(self)->str:
return f'({self.x},{self.y})'
@dataclass
class TreeElementNode:
name:str
control_type:str
shortcut:str
bounding_box:BoundingBox
center:Center
app_name:str
@dataclass
class TextElementNode:
name:str
app_name:str
@dataclass
class ScrollElementNode:
name:str
control_type:str
app_name:str
center:Center
horizontal_scrollable:bool
vertical_scrollable:bool | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/tree/config.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/tree/config.py | INTERACTIVE_CONTROL_TYPE_NAMES=set([
'ButtonControl','ListItemControl','MenuItemControl','DocumentControl',
'EditControl','CheckBoxControl', 'RadioButtonControl','ComboBoxControl',
'HyperlinkControl','SplitButtonControl','TabItemControl','CustomControl',
'TreeItemControl','DataItemControl','HeaderItemControl','TextBoxControl',
'ImageControl','SpinnerControl','ScrollBarControl'
])
INFORMATIVE_CONTROL_TYPE_NAMES=[
'TextControl','ImageControl'
] | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/tree/__init__.py | advanced_ai_agents/single_agent_apps/windows_use_autonomous_agent/windows_use/tree/__init__.py | from windows_use.tree.views import TreeElementNode, TextElementNode, ScrollElementNode, Center, BoundingBox, TreeState
from windows_use.tree.config import INTERACTIVE_CONTROL_TYPE_NAMES,INFORMATIVE_CONTROL_TYPE_NAMES
from concurrent.futures import ThreadPoolExecutor, as_completed
from uiautomation import GetRootControl,Control,ImageControl
from windows_use.desktop.config import AVOIDED_APPS
from PIL import Image, ImageFont, ImageDraw
from typing import TYPE_CHECKING
from time import sleep
import random
if TYPE_CHECKING:
from windows_use.desktop import Desktop
class Tree:
def __init__(self,desktop:'Desktop'):
self.desktop=desktop
def get_state(self)->TreeState:
sleep(0.15)
# Get the root control of the desktop
root=GetRootControl()
interactive_nodes,informative_nodes,scrollable_nodes=self.get_appwise_nodes(node=root)
return TreeState(interactive_nodes=interactive_nodes,informative_nodes=informative_nodes,scrollable_nodes=scrollable_nodes)
def get_appwise_nodes(self,node:Control) -> tuple[list[TreeElementNode],list[TextElementNode]]:
all_apps=node.GetChildren()
visible_apps = {app.Name: app for app in all_apps if self.desktop.is_app_visible(app) and app.Name not in AVOIDED_APPS}
apps={'Taskbar':visible_apps.pop('Taskbar'),'Program Manager':visible_apps.pop('Program Manager')}
if visible_apps:
foreground_app = list(visible_apps.values()).pop(0)
apps[foreground_app.Name.strip()]=foreground_app
interactive_nodes,informative_nodes,scrollable_nodes=[],[],[]
# Parallel traversal (using ThreadPoolExecutor) to get nodes from each app
with ThreadPoolExecutor() as executor:
future_to_node = {executor.submit(self.get_nodes, app): app for app in apps.values()}
for future in as_completed(future_to_node):
try:
result = future.result()
if result:
element_nodes,text_nodes,scroll_nodes=result
interactive_nodes.extend(element_nodes)
informative_nodes.extend(text_nodes)
scrollable_nodes.extend(scroll_nodes)
except Exception as e:
print(f"Error processing node {future_to_node[future].Name}: {e}")
return interactive_nodes,informative_nodes,scrollable_nodes
def get_nodes(self, node: Control) -> tuple[list[TreeElementNode],list[TextElementNode],list[ScrollElementNode]]:
interactive_nodes, informative_nodes, scrollable_nodes = [], [], []
app_name=node.Name.strip()
app_name='Desktop' if app_name=='Program Manager' else app_name
def is_element_interactive(node:Control):
try:
if node.ControlTypeName in INTERACTIVE_CONTROL_TYPE_NAMES:
if is_element_visible(node) and is_element_enabled(node) and not is_element_image(node):
return True
except Exception as ex:
return False
return False
def is_element_visible(node:Control,threshold:int=0):
box=node.BoundingRectangle
if box.isempty():
return False
width=box.width()
height=box.height()
area=width*height
is_offscreen=not node.IsOffscreen
return area > threshold and is_offscreen
def is_element_enabled(node:Control):
try:
return node.IsEnabled
except Exception as ex:
return False
def is_element_image(node:Control):
if isinstance(node,ImageControl):
if not node.Name.strip() or node.LocalizedControlType=='graphic':
return True
return False
def is_element_text(node:Control):
try:
if node.ControlTypeName in INFORMATIVE_CONTROL_TYPE_NAMES:
if is_element_visible(node) and is_element_enabled(node) and not is_element_image(node):
return True
except Exception as ex:
return False
return False
def is_element_scrollable(node:Control):
try:
scroll_pattern=node.GetScrollPattern()
return scroll_pattern.VerticallyScrollable or scroll_pattern.HorizontallyScrollable
except Exception as ex:
return False
def tree_traversal(node: Control):
if is_element_interactive(node):
box = node.BoundingRectangle
x,y=box.xcenter(),box.ycenter()
center = Center(x=x,y=y)
interactive_nodes.append(TreeElementNode(
name=node.Name.strip() or "''",
control_type=node.LocalizedControlType.title(),
shortcut=node.AcceleratorKey or "''",
bounding_box=BoundingBox(left=box.left,top=box.top,right=box.right,bottom=box.bottom),
center=center,
app_name=app_name
))
elif is_element_text(node):
informative_nodes.append(TextElementNode(
name=node.Name.strip() or "''",
app_name=app_name
))
elif is_element_scrollable(node):
scroll_pattern=node.GetScrollPattern()
box = node.BoundingRectangle
x,y=box.xcenter(),box.ycenter()
center = Center(x=x,y=y)
scrollable_nodes.append(ScrollElementNode(
name=node.Name.strip() or node.LocalizedControlType.capitalize() or "''",
app_name=app_name,
control_type=node.LocalizedControlType.title(),
center=center,
horizontal_scrollable=scroll_pattern.HorizontallyScrollable,
vertical_scrollable=scroll_pattern.VerticallyScrollable
))
# Recursively check all children
for child in node.GetChildren():
tree_traversal(child)
tree_traversal(node)
return (interactive_nodes,informative_nodes,scrollable_nodes)
def get_random_color(self):
return "#{:06x}".format(random.randint(0, 0xFFFFFF))
def annotate(self,nodes:list[TreeElementNode])->Image:
screenshot=self.desktop.get_screenshot()
# Include padding to the screenshot
padding=20
width=screenshot.width+(2*padding)
height=screenshot.height+(2*padding)
padded_screenshot=Image.new("RGB", (width, height), color=(255, 255, 255))
padded_screenshot.paste(screenshot, (padding,padding))
# Create a layout above the screenshot to place bounding boxes.
draw=ImageDraw.Draw(padded_screenshot)
font_size=12
try:
font=ImageFont.truetype('arial.ttf',font_size)
except:
font=ImageFont.load_default()
for label,node in enumerate(nodes):
box=node.bounding_box
color=self.get_random_color()
# Adjust bounding box to fit padded image
adjusted_box = (
box.left + padding, box.top + padding, # Adjust top-left corner
box.right + padding, box.bottom + padding # Adjust bottom-right corner
)
# Draw bounding box around the element in the screenshot
draw.rectangle(adjusted_box,outline=color,width=2)
# Get the size of the label
label_width=draw.textlength(str(label),font=font,font_size=font_size)
label_height=font_size
left,top,right,bottom=adjusted_box
# Position the label above the bounding box and towards the right
label_x1 = right - label_width # Align the right side of the label with the right edge of the box
label_y1 = top - label_height - 4 # Place the label just above the top of the bounding box, with some padding
# Draw the label background rectangle
label_x2 = label_x1 + label_width
label_y2 = label_y1 + label_height + 4 # Add some padding
# Draw the label background rectangle
draw.rectangle([(label_x1, label_y1), (label_x2, label_y2)], fill=color)
# Draw the label text
text_x = label_x1 + 2 # Padding for text inside the rectangle
text_y = label_y1 + 2
draw.text((text_x, text_y), str(label), fill=(255, 255, 255), font=font)
return padded_screenshot | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_consultant_agent/ai_consultant_agent.py | advanced_ai_agents/single_agent_apps/ai_consultant_agent/ai_consultant_agent.py | import logging
from typing import Dict, Any, List, Union
from dataclasses import dataclass
import base64
import requests
import os
# Google ADK imports
from google.adk.agents import LlmAgent
from google.adk.tools import google_search
from google.adk.sessions import InMemorySessionService
from google.adk.runners import Runner
# Define constants for the agent configuration
MODEL_ID = "gemini-2.5-flash"
APP_NAME = "ai_consultant_agent"
USER_ID = "consultant-user"
SESSION_ID = "consultant-session"
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def sanitize_bytes_for_json(obj: Any) -> Any:
"""
Recursively convert bytes objects to strings to ensure JSON serializability.
Args:
obj: Any object that might contain bytes
Returns:
Object with all bytes converted to strings
"""
if isinstance(obj, bytes):
try:
# Try to decode as UTF-8 text first
return obj.decode('utf-8')
except UnicodeDecodeError:
# If not valid UTF-8, encode as base64 string
return base64.b64encode(obj).decode('ascii')
elif isinstance(obj, dict):
return {key: sanitize_bytes_for_json(value) for key, value in obj.items()}
elif isinstance(obj, list):
return [sanitize_bytes_for_json(item) for item in obj]
elif isinstance(obj, tuple):
return tuple(sanitize_bytes_for_json(item) for item in obj)
else:
return obj
def safe_tool_wrapper(tool_func):
"""
Wrapper to ensure tool functions never return bytes objects.
Args:
tool_func: The original tool function
Returns:
Wrapped function that sanitizes output
"""
def wrapped_tool(*args, **kwargs):
try:
result = tool_func(*args, **kwargs)
return sanitize_bytes_for_json(result)
except Exception as e:
logger.error(f"Error in tool {tool_func.__name__}: {e}")
return {
"error": f"Tool execution failed: {str(e)}",
"tool": tool_func.__name__,
"status": "error"
}
# Preserve function metadata
wrapped_tool.__name__ = tool_func.__name__
wrapped_tool.__doc__ = tool_func.__doc__
return wrapped_tool
@dataclass
class MarketInsight:
"""Structure for market research insights"""
category: str
finding: str
confidence: float
source: str
def analyze_market_data(research_query: str, industry: str = "") -> Dict[str, Any]:
"""
Analyze market data and generate insights
Args:
research_query: The business query to analyze
industry: Optional industry context
Returns:
Market analysis insights and recommendations
"""
# Simulate market analysis - in real implementation this would process actual search results
insights = []
if "startup" in research_query.lower() or "launch" in research_query.lower():
insights.extend([
MarketInsight("Market Opportunity", "Growing market with moderate competition", 0.8, "Market Research"),
MarketInsight("Risk Assessment", "Standard startup risks apply - funding, competition", 0.7, "Analysis"),
MarketInsight("Recommendation", "Conduct MVP testing before full launch", 0.9, "Strategic Planning")
])
if "saas" in research_query.lower() or "software" in research_query.lower():
insights.extend([
MarketInsight("Technology Trend", "Cloud-based solutions gaining adoption", 0.9, "Tech Analysis"),
MarketInsight("Customer Behavior", "Businesses prefer subscription models", 0.8, "Market Study")
])
if industry:
insights.append(
MarketInsight("Industry Specific", f"{industry} sector shows growth potential", 0.7, "Industry Report")
)
return {
"query": research_query,
"industry": industry,
"insights": [
{
"category": insight.category,
"finding": insight.finding,
"confidence": insight.confidence,
"source": insight.source
}
for insight in insights
],
"summary": f"Analysis completed for: {research_query}",
"total_insights": len(insights)
}
def generate_strategic_recommendations(analysis_data: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Generate strategic business recommendations based on analysis
Args:
analysis_data: Market analysis results
Returns:
List of strategic recommendations
"""
recommendations = []
# Generate recommendations based on insights
insights = analysis_data.get("insights", [])
if any("startup" in insight["finding"].lower() for insight in insights):
recommendations.append({
"category": "Market Entry Strategy",
"priority": "High",
"recommendation": "Implement phased market entry with MVP testing",
"rationale": "Reduces risk and validates market fit before major investment",
"timeline": "3-6 months",
"action_items": [
"Develop minimum viable product",
"Identify target customer segment",
"Conduct market validation tests"
]
})
if any("saas" in insight["finding"].lower() for insight in insights):
recommendations.append({
"category": "Technology Strategy",
"priority": "Medium",
"recommendation": "Focus on cloud-native architecture and subscription model",
"rationale": "Aligns with market trends and customer preferences",
"timeline": "2-4 months",
"action_items": [
"Design scalable cloud infrastructure",
"Implement subscription billing system",
"Plan for multi-tenant architecture"
]
})
# Always include risk management
recommendations.append({
"category": "Risk Management",
"priority": "High",
"recommendation": "Establish comprehensive risk monitoring framework",
"rationale": "Proactive risk management is essential for business success",
"timeline": "1-2 months",
"action_items": [
"Identify key business risks",
"Develop mitigation strategies",
"Implement monitoring systems"
]
})
return recommendations
def perplexity_search(query: str, system_prompt: str = "Be precise and concise. Focus on business insights and market data.") -> Dict[str, Any]:
"""Search the web using Perplexity AI for real-time information and insights."""
try:
api_key = os.getenv("PERPLEXITY_API_KEY")
if not api_key:
return {"error": "Perplexity API key not found. Please set PERPLEXITY_API_KEY environment variable.", "query": query, "status": "error"}
response = requests.post("https://api.perplexity.ai/chat/completions",
json={"model": "sonar", "messages": [{"role": "system", "content": system_prompt}, {"role": "user", "content": query}]},
headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}, timeout=30)
response.raise_for_status()
result = response.json()
if "choices" in result and result["choices"]:
return {"query": query, "content": result["choices"][0]["message"]["content"], "citations": result.get("citations", []),
"search_results": result.get("search_results", []), "status": "success", "source": "Perplexity AI",
"model": result.get("model", "sonar"), "usage": result.get("usage", {}), "response_id": result.get("id", ""), "created": result.get("created", 0)}
return {"error": "No response content found", "query": query, "status": "error", "raw_response": result}
except Exception as e:
return {"error": f"Error: {str(e)}", "query": query, "status": "error"}
# Define the consultant tools with safety wrappers
consultant_tools = [
safe_tool_wrapper(analyze_market_data),
safe_tool_wrapper(generate_strategic_recommendations),
safe_tool_wrapper(perplexity_search)
]
INSTRUCTIONS = """You are a senior AI business consultant specializing in market analysis and strategic planning.
Your expertise includes:
- Business strategy development and recommendations
- Risk assessment and mitigation planning
- Implementation planning with timelines
- Market analysis using your knowledge and available tools
- Real-time web research using Perplexity AI search capabilities
When consulting with clients:
1. Use Perplexity search to gather current market data, competitor information, and industry trends from the web
2. Use the market analysis tool to process business queries and generate insights
3. Use the strategic recommendations tool to create actionable business advice
4. Provide clear, specific recommendations with implementation timelines
5. Focus on practical solutions that drive measurable business outcomes
**Core Responsibilities:**
- Conduct real-time web research using Perplexity AI for current market data and trends
- Analyze competitive landscapes and market opportunities using search results and your knowledge
- Provide strategic guidance with clear action items based on up-to-date information
- Assess risks and suggest mitigation strategies using current market conditions
- Create implementation roadmaps with realistic timelines
- Generate comprehensive business insights combining web research with analysis tools
**Critical Rules:**
- Always search for current market data, trends, and competitor information when relevant using Perplexity search
- Base recommendations on sound business principles, current market insights, and real-time web data
- Provide specific, actionable advice rather than generic guidance
- Include timelines and success metrics in recommendations
- Prioritize recommendations by business impact and feasibility
- Use Perplexity search to validate assumptions and gather supporting evidence with citations
- Combine search results with your analysis tools for comprehensive consultation
**Search Strategy:**
- Use Perplexity search for competitor analysis, market size, industry trends, and regulatory changes
- Look up recent news, funding rounds, and market developments in relevant sectors
- Verify market assumptions with current web data before making recommendations
- Research best practices and case studies from similar businesses
- Always include citations and sources when referencing search results
Always maintain a professional, analytical approach while being results-oriented.
Use all available tools including Perplexity search to provide comprehensive, well-researched consultation backed by current web data and citations."""
# Define the agent instance
root_agent = LlmAgent(
model=MODEL_ID,
name=APP_NAME,
description="An AI business consultant that provides market research, strategic analysis, and actionable recommendations.",
instruction=INSTRUCTIONS,
tools=consultant_tools,
output_key="consultation_response"
)
# Setup Runner and Session Service
session_service = InMemorySessionService()
runner = Runner(
agent=root_agent,
app_name=APP_NAME,
session_service=session_service
)
if __name__ == "__main__":
print("🤖 AI Consultant Agent with Google ADK")
print("=====================================")
print()
print("This agent provides comprehensive business consultation including:")
print("• Market research and analysis")
print("• Strategic recommendations")
print("• Implementation planning")
print("• Risk assessment")
print()
print("To use this agent:")
print("1. Run: adk web .")
print("2. Open the web interface")
print("3. Select 'AI Business Consultant' agent")
print("4. Start your consultation")
print()
print("Example queries:")
print('• "I want to launch a SaaS startup for small businesses"')
print('• "Should I expand my retail business to e-commerce?"')
print('• "What are the market opportunities in the healthcare tech space?"')
print()
print("📊 Use the Eval tab in ADK web to save and evaluate consultation sessions!")
print()
print(f"✅ Agent '{APP_NAME}' initialized successfully!")
print(f" Model: {MODEL_ID}")
print(f" Tools: {len(consultant_tools)} available")
print(f" Session Service: {type(session_service).__name__}")
print(f" Runner: {type(runner).__name__}") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_consultant_agent/__init__.py | advanced_ai_agents/single_agent_apps/ai_consultant_agent/__init__.py |
from .ai_consultant_agent import root_agent, session_service, runner, APP_NAME
from . import agent
__all__ = ['root_agent', 'session_service', 'runner', 'APP_NAME', 'agent'] | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_consultant_agent/agent.py | advanced_ai_agents/single_agent_apps/ai_consultant_agent/agent.py |
from .ai_consultant_agent import root_agent
# Export for ADK CLI discovery
__all__ = ['root_agent'] | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_deep_research_agent/deep_research_openai.py | advanced_ai_agents/single_agent_apps/ai_deep_research_agent/deep_research_openai.py | import asyncio
import streamlit as st
from typing import Dict, Any, List
from agents import Agent, Runner, trace
from agents import set_default_openai_key
from firecrawl import FirecrawlApp
from agents.tool import function_tool
# Set page configuration
st.set_page_config(
page_title="OpenAI Deep Research Agent",
page_icon="📘",
layout="wide"
)
# Initialize session state for API keys if not exists
if "openai_api_key" not in st.session_state:
st.session_state.openai_api_key = ""
if "firecrawl_api_key" not in st.session_state:
st.session_state.firecrawl_api_key = ""
# Sidebar for API keys
with st.sidebar:
st.title("API Configuration")
openai_api_key = st.text_input(
"OpenAI API Key",
value=st.session_state.openai_api_key,
type="password"
)
firecrawl_api_key = st.text_input(
"Firecrawl API Key",
value=st.session_state.firecrawl_api_key,
type="password"
)
if openai_api_key:
st.session_state.openai_api_key = openai_api_key
set_default_openai_key(openai_api_key)
if firecrawl_api_key:
st.session_state.firecrawl_api_key = firecrawl_api_key
# Main content
st.title("📘 OpenAI Deep Research Agent")
st.markdown("This OpenAI Agent from the OpenAI Agents SDK performs deep research on any topic using Firecrawl")
# Research topic input
research_topic = st.text_input("Enter your research topic:", placeholder="e.g., Latest developments in AI")
# Keep the original deep_research tool
@function_tool
async def deep_research(query: str, max_depth: int, time_limit: int, max_urls: int) -> Dict[str, Any]:
"""
Perform comprehensive web research using Firecrawl's deep research endpoint.
"""
try:
# Initialize FirecrawlApp with the API key from session state
firecrawl_app = FirecrawlApp(api_key=st.session_state.firecrawl_api_key)
# Define research parameters
params = {
"maxDepth": max_depth,
"timeLimit": time_limit,
"maxUrls": max_urls
}
# Set up a callback for real-time updates
def on_activity(activity):
st.write(f"[{activity['type']}] {activity['message']}")
# Run deep research
with st.spinner("Performing deep research..."):
results = firecrawl_app.deep_research(
query=query,
params=params,
on_activity=on_activity
)
return {
"success": True,
"final_analysis": results['data']['finalAnalysis'],
"sources_count": len(results['data']['sources']),
"sources": results['data']['sources']
}
except Exception as e:
st.error(f"Deep research error: {str(e)}")
return {"error": str(e), "success": False}
# Keep the original agents
research_agent = Agent(
name="research_agent",
instructions="""You are a research assistant that can perform deep web research on any topic.
When given a research topic or question:
1. Use the deep_research tool to gather comprehensive information
- Always use these parameters:
* max_depth: 3 (for moderate depth)
* time_limit: 180 (3 minutes)
* max_urls: 10 (sufficient sources)
2. The tool will search the web, analyze multiple sources, and provide a synthesis
3. Review the research results and organize them into a well-structured report
4. Include proper citations for all sources
5. Highlight key findings and insights
""",
tools=[deep_research]
)
elaboration_agent = Agent(
name="elaboration_agent",
instructions="""You are an expert content enhancer specializing in research elaboration.
When given a research report:
1. Analyze the structure and content of the report
2. Enhance the report by:
- Adding more detailed explanations of complex concepts
- Including relevant examples, case studies, and real-world applications
- Expanding on key points with additional context and nuance
- Adding visual elements descriptions (charts, diagrams, infographics)
- Incorporating latest trends and future predictions
- Suggesting practical implications for different stakeholders
3. Maintain academic rigor and factual accuracy
4. Preserve the original structure while making it more comprehensive
5. Ensure all additions are relevant and valuable to the topic
"""
)
async def run_research_process(topic: str):
"""Run the complete research process."""
# Step 1: Initial Research
with st.spinner("Conducting initial research..."):
research_result = await Runner.run(research_agent, topic)
initial_report = research_result.final_output
# Display initial report in an expander
with st.expander("View Initial Research Report"):
st.markdown(initial_report)
# Step 2: Enhance the report
with st.spinner("Enhancing the report with additional information..."):
elaboration_input = f"""
RESEARCH TOPIC: {topic}
INITIAL RESEARCH REPORT:
{initial_report}
Please enhance this research report with additional information, examples, case studies,
and deeper insights while maintaining its academic rigor and factual accuracy.
"""
elaboration_result = await Runner.run(elaboration_agent, elaboration_input)
enhanced_report = elaboration_result.final_output
return enhanced_report
# Main research process
if st.button("Start Research", disabled=not (openai_api_key and firecrawl_api_key and research_topic)):
if not openai_api_key or not firecrawl_api_key:
st.warning("Please enter both API keys in the sidebar.")
elif not research_topic:
st.warning("Please enter a research topic.")
else:
try:
# Create placeholder for the final report
report_placeholder = st.empty()
# Run the research process
enhanced_report = asyncio.run(run_research_process(research_topic))
# Display the enhanced report
report_placeholder.markdown("## Enhanced Research Report")
report_placeholder.markdown(enhanced_report)
# Add download button
st.download_button(
"Download Report",
enhanced_report,
file_name=f"{research_topic.replace(' ', '_')}_report.md",
mime="text/markdown"
)
except Exception as e:
st.error(f"An error occurred: {str(e)}")
# Footer
st.markdown("---")
st.markdown("Powered by OpenAI Agents SDK and Firecrawl") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_personal_finance_agent/finance_agent.py | advanced_ai_agents/single_agent_apps/ai_personal_finance_agent/finance_agent.py | from textwrap import dedent
from agno.agent import Agent
from agno.run.agent import RunOutput
from agno.tools.serpapi import SerpApiTools
import streamlit as st
from agno.models.openai import OpenAIChat
# Set up the Streamlit app
st.title("AI Personal Finance Planner 💰")
st.caption("Manage your finances with AI Personal Finance Manager by creating personalized budgets, investment plans, and savings strategies using GPT-4o")
# Get OpenAI API key from user
openai_api_key = st.text_input("Enter OpenAI API Key to access GPT-4o", type="password")
# Get SerpAPI key from the user
serp_api_key = st.text_input("Enter Serp API Key for Search functionality", type="password")
if openai_api_key and serp_api_key:
researcher = Agent(
name="Researcher",
role="Searches for financial advice, investment opportunities, and savings strategies based on user preferences",
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
description=dedent(
"""\
You are a world-class financial researcher. Given a user's financial goals and current financial situation,
generate a list of search terms for finding relevant financial advice, investment opportunities, and savings strategies.
Then search the web for each term, analyze the results, and return the 10 most relevant results.
"""
),
instructions=[
"Given a user's financial goals and current financial situation, first generate a list of 3 search terms related to those goals.",
"For each search term, `search_google` and analyze the results.",
"From the results of all searches, return the 10 most relevant results to the user's preferences.",
"Remember: the quality of the results is important.",
],
tools=[SerpApiTools(api_key=serp_api_key)],
add_datetime_to_context=True,
)
planner = Agent(
name="Planner",
role="Generates a personalized financial plan based on user preferences and research results",
model=OpenAIChat(id="gpt-4o", api_key=openai_api_key),
description=dedent(
"""\
You are a senior financial planner. Given a user's financial goals, current financial situation, and a list of research results,
your goal is to generate a personalized financial plan that meets the user's needs and preferences.
"""
),
instructions=[
"Given a user's financial goals, current financial situation, and a list of research results, generate a personalized financial plan that includes suggested budgets, investment plans, and savings strategies.",
"Ensure the plan is well-structured, informative, and engaging.",
"Ensure you provide a nuanced and balanced plan, quoting facts where possible.",
"Remember: the quality of the plan is important.",
"Focus on clarity, coherence, and overall quality.",
"Never make up facts or plagiarize. Always provide proper attribution.",
],
add_datetime_to_context=True,
)
# Input fields for the user's financial goals and current financial situation
financial_goals = st.text_input("What are your financial goals?")
current_situation = st.text_area("Describe your current financial situation")
if st.button("Generate Financial Plan"):
with st.spinner("Processing..."):
# Get the response from the assistant
response: RunOutput = planner.run(f"Financial goals: {financial_goals}, Current situation: {current_situation}", stream=False)
st.write(response.content)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/advanced_ai_agents/single_agent_apps/ai_health_fitness_agent/health_agent.py | advanced_ai_agents/single_agent_apps/ai_health_fitness_agent/health_agent.py | import streamlit as st
from agno.agent import Agent
from agno.run.agent import RunOutput
from agno.models.google import Gemini
st.set_page_config(
page_title="AI Health & Fitness Planner",
page_icon="🏋️♂️",
layout="wide",
initial_sidebar_state="expanded"
)
st.markdown("""
<style>
.main {
padding: 2rem;
}
.stButton>button {
width: 100%;
border-radius: 5px;
height: 3em;
}
.success-box {
padding: 1rem;
border-radius: 0.5rem;
background-color: #f0fff4;
border: 1px solid #9ae6b4;
}
.warning-box {
padding: 1rem;
border-radius: 0.5rem;
background-color: #fffaf0;
border: 1px solid #fbd38d;
}
div[data-testid="stExpander"] div[role="button"] p {
font-size: 1.1rem;
font-weight: 600;
}
</style>
""", unsafe_allow_html=True)
def display_dietary_plan(plan_content):
with st.expander("📋 Your Personalized Dietary Plan", expanded=True):
col1, col2 = st.columns([2, 1])
with col1:
st.markdown("### 🎯 Why this plan works")
st.info(plan_content.get("why_this_plan_works", "Information not available"))
st.markdown("### 🍽️ Meal Plan")
st.write(plan_content.get("meal_plan", "Plan not available"))
with col2:
st.markdown("### ⚠️ Important Considerations")
considerations = plan_content.get("important_considerations", "").split('\n')
for consideration in considerations:
if consideration.strip():
st.warning(consideration)
def display_fitness_plan(plan_content):
with st.expander("💪 Your Personalized Fitness Plan", expanded=True):
col1, col2 = st.columns([2, 1])
with col1:
st.markdown("### 🎯 Goals")
st.success(plan_content.get("goals", "Goals not specified"))
st.markdown("### 🏋️♂️ Exercise Routine")
st.write(plan_content.get("routine", "Routine not available"))
with col2:
st.markdown("### 💡 Pro Tips")
tips = plan_content.get("tips", "").split('\n')
for tip in tips:
if tip.strip():
st.info(tip)
def main():
if 'dietary_plan' not in st.session_state:
st.session_state.dietary_plan = {}
st.session_state.fitness_plan = {}
st.session_state.qa_pairs = []
st.session_state.plans_generated = False
st.title("🏋️♂️ AI Health & Fitness Planner")
st.markdown("""
<div style='background-color: #00008B; padding: 1rem; border-radius: 0.5rem; margin-bottom: 2rem;'>
Get personalized dietary and fitness plans tailored to your goals and preferences.
Our AI-powered system considers your unique profile to create the perfect plan for you.
</div>
""", unsafe_allow_html=True)
with st.sidebar:
st.header("🔑 API Configuration")
gemini_api_key = st.text_input(
"Gemini API Key",
type="password",
help="Enter your Gemini API key to access the service"
)
if not gemini_api_key:
st.warning("⚠️ Please enter your Gemini API Key to proceed")
st.markdown("[Get your API key here](https://aistudio.google.com/apikey)")
return
st.success("API Key accepted!")
if gemini_api_key:
try:
gemini_model = Gemini(id="gemini-2.5-flash-preview-05-20", api_key=gemini_api_key)
except Exception as e:
st.error(f"❌ Error initializing Gemini model: {e}")
return
st.header("👤 Your Profile")
col1, col2 = st.columns(2)
with col1:
age = st.number_input("Age", min_value=10, max_value=100, step=1, help="Enter your age")
height = st.number_input("Height (cm)", min_value=100.0, max_value=250.0, step=0.1)
activity_level = st.selectbox(
"Activity Level",
options=["Sedentary", "Lightly Active", "Moderately Active", "Very Active", "Extremely Active"],
help="Choose your typical activity level"
)
dietary_preferences = st.selectbox(
"Dietary Preferences",
options=["Vegetarian", "Keto", "Gluten Free", "Low Carb", "Dairy Free"],
help="Select your dietary preference"
)
with col2:
weight = st.number_input("Weight (kg)", min_value=20.0, max_value=300.0, step=0.1)
sex = st.selectbox("Sex", options=["Male", "Female", "Other"])
fitness_goals = st.selectbox(
"Fitness Goals",
options=["Lose Weight", "Gain Muscle", "Endurance", "Stay Fit", "Strength Training"],
help="What do you want to achieve?"
)
if st.button("🎯 Generate My Personalized Plan", use_container_width=True):
with st.spinner("Creating your perfect health and fitness routine..."):
try:
dietary_agent = Agent(
name="Dietary Expert",
role="Provides personalized dietary recommendations",
model=gemini_model,
instructions=[
"Consider the user's input, including dietary restrictions and preferences.",
"Suggest a detailed meal plan for the day, including breakfast, lunch, dinner, and snacks.",
"Provide a brief explanation of why the plan is suited to the user's goals.",
"Focus on clarity, coherence, and quality of the recommendations.",
]
)
fitness_agent = Agent(
name="Fitness Expert",
role="Provides personalized fitness recommendations",
model=gemini_model,
instructions=[
"Provide exercises tailored to the user's goals.",
"Include warm-up, main workout, and cool-down exercises.",
"Explain the benefits of each recommended exercise.",
"Ensure the plan is actionable and detailed.",
]
)
user_profile = f"""
Age: {age}
Weight: {weight}kg
Height: {height}cm
Sex: {sex}
Activity Level: {activity_level}
Dietary Preferences: {dietary_preferences}
Fitness Goals: {fitness_goals}
"""
dietary_plan_response: RunOutput = dietary_agent.run(user_profile)
dietary_plan = {
"why_this_plan_works": "High Protein, Healthy Fats, Moderate Carbohydrates, and Caloric Balance",
"meal_plan": dietary_plan_response.content,
"important_considerations": """
- Hydration: Drink plenty of water throughout the day
- Electrolytes: Monitor sodium, potassium, and magnesium levels
- Fiber: Ensure adequate intake through vegetables and fruits
- Listen to your body: Adjust portion sizes as needed
"""
}
fitness_plan_response: RunOutput = fitness_agent.run(user_profile)
fitness_plan = {
"goals": "Build strength, improve endurance, and maintain overall fitness",
"routine": fitness_plan_response.content,
"tips": """
- Track your progress regularly
- Allow proper rest between workouts
- Focus on proper form
- Stay consistent with your routine
"""
}
st.session_state.dietary_plan = dietary_plan
st.session_state.fitness_plan = fitness_plan
st.session_state.plans_generated = True
st.session_state.qa_pairs = []
display_dietary_plan(dietary_plan)
display_fitness_plan(fitness_plan)
except Exception as e:
st.error(f"❌ An error occurred: {e}")
if st.session_state.plans_generated:
st.header("❓ Questions about your plan?")
question_input = st.text_input("What would you like to know?")
if st.button("Get Answer"):
if question_input:
with st.spinner("Finding the best answer for you..."):
dietary_plan = st.session_state.dietary_plan
fitness_plan = st.session_state.fitness_plan
context = f"Dietary Plan: {dietary_plan.get('meal_plan', '')}\n\nFitness Plan: {fitness_plan.get('routine', '')}"
full_context = f"{context}\nUser Question: {question_input}"
try:
agent = Agent(model=gemini_model, debug_mode=True, markdown=True)
run_response: RunOutput = agent.run(full_context)
if hasattr(run_response, 'content'):
answer = run_response.content
else:
answer = "Sorry, I couldn't generate a response at this time."
st.session_state.qa_pairs.append((question_input, answer))
except Exception as e:
st.error(f"❌ An error occurred while getting the answer: {e}")
if st.session_state.qa_pairs:
st.header("💬 Q&A History")
for question, answer in st.session_state.qa_pairs:
st.markdown(f"**Q:** {question}")
st.markdown(f"**A:** {answer}")
if __name__ == "__main__":
main() | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/deepseek_local_rag_agent/deepseek_rag_agent.py | rag_tutorials/deepseek_local_rag_agent/deepseek_rag_agent.py | import os
import tempfile
from datetime import datetime
from typing import List
import streamlit as st
import bs4
from agno.agent import Agent
from agno.models.ollama import Ollama
from langchain_community.document_loaders import PyPDFLoader, WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
from langchain_core.embeddings import Embeddings
from agno.tools.exa import ExaTools
from agno.embedder.ollama import OllamaEmbedder
class OllamaEmbedderr(Embeddings):
def __init__(self, model_name="snowflake-arctic-embed"):
"""
Initialize the OllamaEmbedderr with a specific model.
Args:
model_name (str): The name of the model to use for embedding.
"""
self.embedder = OllamaEmbedder(id=model_name, dimensions=1024)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> List[float]:
return self.embedder.get_embedding(text)
# Constants
COLLECTION_NAME = "test-deepseek-r1"
# Streamlit App Initialization
st.title("🐋 Deepseek Local RAG Reasoning Agent")
# Session State Initialization
if 'google_api_key' not in st.session_state:
st.session_state.google_api_key = ""
if 'qdrant_api_key' not in st.session_state:
st.session_state.qdrant_api_key = ""
if 'qdrant_url' not in st.session_state:
st.session_state.qdrant_url = ""
if 'model_version' not in st.session_state:
st.session_state.model_version = "deepseek-r1:1.5b" # Default to lighter model
if 'vector_store' not in st.session_state:
st.session_state.vector_store = None
if 'processed_documents' not in st.session_state:
st.session_state.processed_documents = []
if 'history' not in st.session_state:
st.session_state.history = []
if 'exa_api_key' not in st.session_state:
st.session_state.exa_api_key = ""
if 'use_web_search' not in st.session_state:
st.session_state.use_web_search = False
if 'force_web_search' not in st.session_state:
st.session_state.force_web_search = False
if 'similarity_threshold' not in st.session_state:
st.session_state.similarity_threshold = 0.7
if 'rag_enabled' not in st.session_state:
st.session_state.rag_enabled = True # RAG is enabled by default
# Sidebar Configuration
st.sidebar.header("🤖 Agent Configuration")
# Model Selection
st.sidebar.header("📦 Model Selection")
model_help = """
- 1.5b: Lighter model, suitable for most laptops
- 7b: More capable but requires better GPU/RAM
Choose based on your hardware capabilities.
"""
st.session_state.model_version = st.sidebar.radio(
"Select Model Version",
options=["deepseek-r1:1.5b", "deepseek-r1:7b"],
help=model_help
)
st.sidebar.info("Run ollama pull deepseek-r1:7b or deepseek-r1:1.5b respectively")
# RAG Mode Toggle
st.sidebar.header("🔍 RAG Configuration")
st.session_state.rag_enabled = st.sidebar.toggle("Enable RAG Mode", value=st.session_state.rag_enabled)
# Clear Chat Button
if st.sidebar.button("🗑️ Clear Chat History"):
st.session_state.history = []
st.rerun()
# Show API Configuration only if RAG is enabled
if st.session_state.rag_enabled:
st.sidebar.header("🔑 API Configuration")
qdrant_api_key = st.sidebar.text_input("Qdrant API Key", type="password", value=st.session_state.qdrant_api_key)
qdrant_url = st.sidebar.text_input("Qdrant URL",
placeholder="https://your-cluster.cloud.qdrant.io:6333",
value=st.session_state.qdrant_url)
# Update session state
st.session_state.qdrant_api_key = qdrant_api_key
st.session_state.qdrant_url = qdrant_url
# Search Configuration (only shown in RAG mode)
st.sidebar.header("🎯 Search Configuration")
st.session_state.similarity_threshold = st.sidebar.slider(
"Document Similarity Threshold",
min_value=0.0,
max_value=1.0,
value=0.7,
help="Lower values will return more documents but might be less relevant. Higher values are more strict."
)
# Add in the sidebar configuration section, after the existing API inputs
st.sidebar.header("🌐 Web Search Configuration")
st.session_state.use_web_search = st.sidebar.checkbox("Enable Web Search Fallback", value=st.session_state.use_web_search)
if st.session_state.use_web_search:
exa_api_key = st.sidebar.text_input(
"Exa AI API Key",
type="password",
value=st.session_state.exa_api_key,
help="Required for web search fallback when no relevant documents are found"
)
st.session_state.exa_api_key = exa_api_key
# Optional domain filtering
default_domains = ["arxiv.org", "wikipedia.org", "github.com", "medium.com"]
custom_domains = st.sidebar.text_input(
"Custom domains (comma-separated)",
value=",".join(default_domains),
help="Enter domains to search from, e.g.: arxiv.org,wikipedia.org"
)
search_domains = [d.strip() for d in custom_domains.split(",") if d.strip()]
# Search Configuration moved inside RAG mode check
# Utility Functions
def init_qdrant() -> QdrantClient | None:
"""Initialize Qdrant client with configured settings.
Returns:
QdrantClient: The initialized Qdrant client if successful.
None: If the initialization fails.
"""
if not all([st.session_state.qdrant_api_key, st.session_state.qdrant_url]):
return None
try:
return QdrantClient(
url=st.session_state.qdrant_url,
api_key=st.session_state.qdrant_api_key,
timeout=60
)
except Exception as e:
st.error(f"🔴 Qdrant connection failed: {str(e)}")
return None
# Document Processing Functions
def process_pdf(file) -> List:
"""Process PDF file and add source metadata."""
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
tmp_file.write(file.getvalue())
loader = PyPDFLoader(tmp_file.name)
documents = loader.load()
# Add source metadata
for doc in documents:
doc.metadata.update({
"source_type": "pdf",
"file_name": file.name,
"timestamp": datetime.now().isoformat()
})
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
return text_splitter.split_documents(documents)
except Exception as e:
st.error(f"📄 PDF processing error: {str(e)}")
return []
def process_web(url: str) -> List:
"""Process web URL and add source metadata."""
try:
loader = WebBaseLoader(
web_paths=(url,),
bs_kwargs=dict(
parse_only=bs4.SoupStrainer(
class_=("post-content", "post-title", "post-header", "content", "main")
)
)
)
documents = loader.load()
# Add source metadata
for doc in documents:
doc.metadata.update({
"source_type": "url",
"url": url,
"timestamp": datetime.now().isoformat()
})
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
return text_splitter.split_documents(documents)
except Exception as e:
st.error(f"🌐 Web processing error: {str(e)}")
return []
# Vector Store Management
def create_vector_store(client, texts):
"""Create and initialize vector store with documents."""
try:
# Create collection if needed
try:
client.create_collection(
collection_name=COLLECTION_NAME,
vectors_config=VectorParams(
size=1024,
distance=Distance.COSINE
)
)
st.success(f"📚 Created new collection: {COLLECTION_NAME}")
except Exception as e:
if "already exists" not in str(e).lower():
raise e
# Initialize vector store
vector_store = QdrantVectorStore(
client=client,
collection_name=COLLECTION_NAME,
embedding=OllamaEmbedderr()
)
# Add documents
with st.spinner('📤 Uploading documents to Qdrant...'):
vector_store.add_documents(texts)
st.success("✅ Documents stored successfully!")
return vector_store
except Exception as e:
st.error(f"🔴 Vector store error: {str(e)}")
return None
def get_web_search_agent() -> Agent:
"""Initialize a web search agent."""
return Agent(
name="Web Search Agent",
model=Ollama(id="llama3.2"),
tools=[ExaTools(
api_key=st.session_state.exa_api_key,
include_domains=search_domains,
num_results=5
)],
instructions="""You are a web search expert. Your task is to:
1. Search the web for relevant information about the query
2. Compile and summarize the most relevant information
3. Include sources in your response
""",
show_tool_calls=True,
markdown=True,
)
def get_rag_agent() -> Agent:
"""Initialize the main RAG agent."""
return Agent(
name="DeepSeek RAG Agent",
model=Ollama(id=st.session_state.model_version),
instructions="""You are an Intelligent Agent specializing in providing accurate answers.
When asked a question:
- Analyze the question and answer the question with what you know.
When given context from documents:
- Focus on information from the provided documents
- Be precise and cite specific details
When given web search results:
- Clearly indicate that the information comes from web search
- Synthesize the information clearly
Always maintain high accuracy and clarity in your responses.
""",
show_tool_calls=True,
markdown=True,
)
def check_document_relevance(query: str, vector_store, threshold: float = 0.7) -> tuple[bool, List]:
if not vector_store:
return False, []
retriever = vector_store.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 5, "score_threshold": threshold}
)
docs = retriever.invoke(query)
return bool(docs), docs
chat_col, toggle_col = st.columns([0.9, 0.1])
with chat_col:
prompt = st.chat_input("Ask about your documents..." if st.session_state.rag_enabled else "Ask me anything...")
with toggle_col:
st.session_state.force_web_search = st.toggle('🌐', help="Force web search")
# Check if RAG is enabled
if st.session_state.rag_enabled:
qdrant_client = init_qdrant()
# File/URL Upload Section
st.sidebar.header("📁 Data Upload")
uploaded_file = st.sidebar.file_uploader("Upload PDF", type=["pdf"])
web_url = st.sidebar.text_input("Or enter URL")
# Process documents
if uploaded_file:
file_name = uploaded_file.name
if file_name not in st.session_state.processed_documents:
with st.spinner('Processing PDF...'):
texts = process_pdf(uploaded_file)
if texts and qdrant_client:
if st.session_state.vector_store:
st.session_state.vector_store.add_documents(texts)
else:
st.session_state.vector_store = create_vector_store(qdrant_client, texts)
st.session_state.processed_documents.append(file_name)
st.success(f"✅ Added PDF: {file_name}")
if web_url:
if web_url not in st.session_state.processed_documents:
with st.spinner('Processing URL...'):
texts = process_web(web_url)
if texts and qdrant_client:
if st.session_state.vector_store:
st.session_state.vector_store.add_documents(texts)
else:
st.session_state.vector_store = create_vector_store(qdrant_client, texts)
st.session_state.processed_documents.append(web_url)
st.success(f"✅ Added URL: {web_url}")
# Display sources in sidebar
if st.session_state.processed_documents:
st.sidebar.header("📚 Processed Sources")
for source in st.session_state.processed_documents:
if source.endswith('.pdf'):
st.sidebar.text(f"📄 {source}")
else:
st.sidebar.text(f"🌐 {source}")
if prompt:
# Add user message to history
st.session_state.history.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
if st.session_state.rag_enabled:
# Existing RAG flow remains unchanged
with st.spinner("🤔Evaluating the Query..."):
try:
rewritten_query = prompt
with st.expander("Evaluating the query"):
st.write(f"User's Prompt: {prompt}")
except Exception as e:
st.error(f"❌ Error rewriting query: {str(e)}")
rewritten_query = prompt
# Step 2: Choose search strategy based on force_web_search toggle
context = ""
docs = []
if not st.session_state.force_web_search and st.session_state.vector_store:
# Try document search first
retriever = st.session_state.vector_store.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={
"k": 5,
"score_threshold": st.session_state.similarity_threshold
}
)
docs = retriever.invoke(rewritten_query)
if docs:
context = "\n\n".join([d.page_content for d in docs])
st.info(f"📊 Found {len(docs)} relevant documents (similarity > {st.session_state.similarity_threshold})")
elif st.session_state.use_web_search:
st.info("🔄 No relevant documents found in database, falling back to web search...")
# Step 3: Use web search if:
# 1. Web search is forced ON via toggle, or
# 2. No relevant documents found AND web search is enabled in settings
if (st.session_state.force_web_search or not context) and st.session_state.use_web_search and st.session_state.exa_api_key:
with st.spinner("🔍 Searching the web..."):
try:
web_search_agent = get_web_search_agent()
web_results = web_search_agent.run(rewritten_query).content
if web_results:
context = f"Web Search Results:\n{web_results}"
if st.session_state.force_web_search:
st.info("ℹ️ Using web search as requested via toggle.")
else:
st.info("ℹ️ Using web search as fallback since no relevant documents were found.")
except Exception as e:
st.error(f"❌ Web search error: {str(e)}")
# Step 4: Generate response using the RAG agent
with st.spinner("🤖 Thinking..."):
try:
rag_agent = get_rag_agent()
if context:
full_prompt = f"""Context: {context}
Original Question: {prompt}
Please provide a comprehensive answer based on the available information."""
else:
full_prompt = f"Original Question: {prompt}\n"
st.info("ℹ️ No relevant information found in documents or web search.")
response = rag_agent.run(full_prompt)
# Add assistant response to history
st.session_state.history.append({
"role": "assistant",
"content": response.content
})
# Display assistant response
with st.chat_message("assistant"):
st.write(response.content)
# Show sources if available
if not st.session_state.force_web_search and 'docs' in locals() and docs:
with st.expander("🔍 See document sources"):
for i, doc in enumerate(docs, 1):
source_type = doc.metadata.get("source_type", "unknown")
source_icon = "📄" if source_type == "pdf" else "🌐"
source_name = doc.metadata.get("file_name" if source_type == "pdf" else "url", "unknown")
st.write(f"{source_icon} Source {i} from {source_name}:")
st.write(f"{doc.page_content[:200]}...")
except Exception as e:
st.error(f"❌ Error generating response: {str(e)}")
else:
# Simple mode without RAG
with st.spinner("🤖 Thinking..."):
try:
rag_agent = get_rag_agent()
web_search_agent = get_web_search_agent() if st.session_state.use_web_search else None
# Handle web search if forced or enabled
context = ""
if st.session_state.force_web_search and web_search_agent:
with st.spinner("🔍 Searching the web..."):
try:
web_results = web_search_agent.run(prompt).content
if web_results:
context = f"Web Search Results:\n{web_results}"
st.info("ℹ️ Using web search as requested.")
except Exception as e:
st.error(f"❌ Web search error: {str(e)}")
# Generate response
if context:
full_prompt = f"""Context: {context}
Question: {prompt}
Please provide a comprehensive answer based on the available information."""
else:
full_prompt = prompt
response = rag_agent.run(full_prompt)
response_content = response.content
# Extract thinking process and final response
import re
think_pattern = r'<think>(.*?)</think>'
think_match = re.search(think_pattern, response_content, re.DOTALL)
if think_match:
thinking_process = think_match.group(1).strip()
final_response = re.sub(think_pattern, '', response_content, flags=re.DOTALL).strip()
else:
thinking_process = None
final_response = response_content
# Add assistant response to history (only the final response)
st.session_state.history.append({
"role": "assistant",
"content": final_response
})
# Display assistant response
with st.chat_message("assistant"):
if thinking_process:
with st.expander("🤔 See thinking process"):
st.markdown(thinking_process)
st.markdown(final_response)
except Exception as e:
st.error(f"❌ Error generating response: {str(e)}")
else:
st.warning("You can directly talk to r1 locally! Toggle the RAG mode to upload documents!") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/llama3.1_local_rag/llama3.1_local_rag.py | rag_tutorials/llama3.1_local_rag/llama3.1_local_rag.py | import streamlit as st
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_ollama import OllamaEmbeddings
from langchain_ollama import ChatOllama
st.title("Chat with Webpage 🌐")
st.caption("This app allows you to chat with a webpage using local llama3 and RAG")
# Get the webpage URL from the user
webpage_url = st.text_input("Enter Webpage URL", type="default")
# Connect to Ollama
ollama_endpoint = "http://127.0.0.1:11434"
ollama_model = "llama3.1"
ollama = ChatOllama(model=ollama_model, base_url=ollama_endpoint)
if webpage_url:
# 1. Load the data
loader = WebBaseLoader(webpage_url)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=10)
splits = text_splitter.split_documents(docs)
# 2. Create Ollama embeddings and vector store
embeddings = OllamaEmbeddings(model=ollama_model, base_url=ollama_endpoint)
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
# 3. Call Ollama Llama3 model
def ollama_llm(question, context):
"""Generates a response to a question using the Ollama Llama3 model.
This function takes a question and its context, formats them into a prompt,
and invokes the Ollama Llama3 model to generate a response.
Args:
question (str): The question to be answered by the model.
context (str): The context or additional information related to the question.
Returns:
str: The response generated by the Ollama Llama3 model, stripped of leading and trailing whitespace."""
formatted_prompt = f"Question: {question}\n\nContext: {context}"
response = ollama.invoke([('human', formatted_prompt)])
return response.content.strip()
# 4. RAG Setup
retriever = vectorstore.as_retriever()
def combine_docs(docs):
"""Combines the content of multiple document objects into a single string.
Args:
docs (list): A list of document objects, each having a 'page_content' attribute.
Returns:
str: A string consisting of the combined 'page_content' of all document objects,
separated by two newline characters."""
return "\n\n".join(doc.page_content for doc in docs)
def rag_chain(question):
"""Processes a question to retrieve and format relevant documents, and generates a response using a language model.
Args:
question (str): The question or query that needs to be answered.
Returns:
str: The response generated by the language model based on the retrieved and formatted documents."""
retrieved_docs = retriever.invoke(question)
formatted_context = combine_docs(retrieved_docs)
return ollama_llm(question, formatted_context)
st.success(f"Loaded {webpage_url} successfully!")
# Ask a question about the webpage
prompt = st.text_input("Ask any question about the webpage")
# Chat with the webpage
if prompt:
result = rag_chain(prompt)
st.write(result)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/local_rag_agent/local_rag_agent.py | rag_tutorials/local_rag_agent/local_rag_agent.py | # Import necessary libraries
from agno.agent import Agent
from agno.models.ollama import Ollama
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.qdrant import Qdrant
from agno.knowledge.embedder.ollama import OllamaEmbedder
from agno.os import AgentOS
# Define the collection name for the vector database
collection_name = "thai-recipe-index"
# Set up Qdrant as the vector database with the embedder
vector_db = Qdrant(
collection=collection_name,
url="http://localhost:6333/",
embedder=OllamaEmbedder()
)
# Define the knowledge base
knowledge_base = Knowledge(
vector_db=vector_db,
)
# Add content to the knowledge base, comment out after the first run to avoid reloading
knowledge_base.add_content(
url="https://phi-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"
)
# Create the Agent using Ollama's llama3.2 model and the knowledge base
agent = Agent(
name="Local RAG Agent",
model=Ollama(id="llama3.2"),
knowledge=knowledge_base,
)
# UI for RAG agent
agent_os = AgentOS(agents=[agent])
app = agent_os.get_app()
# Run the AgentOS app
if __name__ == "__main__":
agent_os.serve(app="local_rag_agent:app", reload=True)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/agentic_rag_with_reasoning/rag_reasoning_agent.py | rag_tutorials/agentic_rag_with_reasoning/rag_reasoning_agent.py | import streamlit as st
from agno.agent import Agent
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.google import Gemini
from agno.tools.reasoning import ReasoningTools
from agno.vectordb.lancedb import LanceDb, SearchType
from dotenv import load_dotenv
import os
# Load environment variables
load_dotenv()
# Page configuration
st.set_page_config(
page_title="Agentic RAG with Reasoning",
page_icon="🧐",
layout="wide"
)
# Main title and description
st.title("🧐 Agentic RAG with Reasoning")
st.markdown("""
This app demonstrates an AI agent that:
1. **Retrieves** relevant information from knowledge sources
2. **Reasons** through the information step-by-step
3. **Answers** your questions with citations
Enter your API keys below to get started!
""")
# API Keys Section
st.subheader("🔑 API Keys")
col1, col2 = st.columns(2)
with col1:
google_key = st.text_input(
"Google API Key",
type="password",
value=os.getenv("GOOGLE_API_KEY", ""),
help="Get your key from https://aistudio.google.com/apikey"
)
with col2:
openai_key = st.text_input(
"OpenAI API Key",
type="password",
value=os.getenv("OPENAI_API_KEY", ""),
help="Get your key from https://platform.openai.com/"
)
# Check if API keys are provided
if google_key and openai_key:
# Initialize URLs in session state
if 'knowledge_urls' not in st.session_state:
st.session_state.knowledge_urls = ["https://www.theunwindai.com/p/mcp-vs-a2a-complementing-or-supplementing"] # Default URL
if 'urls_loaded' not in st.session_state:
st.session_state.urls_loaded = set()
# Initialize knowledge base (cached to avoid reloading)
@st.cache_resource(show_spinner="📚 Loading knowledge base...")
def load_knowledge() -> Knowledge:
"""Load and initialize the knowledge base with vector database"""
kb = Knowledge(
vector_db=LanceDb(
uri="tmp/lancedb",
table_name="agno_docs",
search_type=SearchType.vector, # Use vector search
embedder=OpenAIEmbedder(
api_key=openai_key
),
),
)
return kb
# Initialize agent (cached to avoid reloading)
@st.cache_resource(show_spinner="🤖 Loading agent...")
def load_agent(_kb: Knowledge) -> Agent:
"""Create an agent with reasoning capabilities"""
return Agent(
model=Gemini(
id="gemini-2.5-flash",
api_key=google_key
),
knowledge=_kb,
search_knowledge=True, # Enable knowledge search
tools=[ReasoningTools(add_instructions=True)], # Add reasoning tools
instructions=[
"Include sources in your response.",
"Always search your knowledge before answering the question.",
],
markdown=True, # Enable markdown formatting
)
# Load knowledge and agent
knowledge = load_knowledge()
# Load initial URLs if any (only load once per URL)
for url in st.session_state.knowledge_urls:
if url not in st.session_state.urls_loaded:
knowledge.add_content(url=url)
st.session_state.urls_loaded.add(url)
agent = load_agent(knowledge)
# Sidebar for knowledge management
with st.sidebar:
st.header("📚 Knowledge Sources")
st.markdown("Add URLs to expand the knowledge base:")
# Show current URLs
st.write("**Current sources:**")
for i, url in enumerate(st.session_state.knowledge_urls):
st.text(f"{i+1}. {url}")
# Add new URL
st.divider()
new_url = st.text_input(
"Add new URL",
placeholder="https://www.theunwindai.com/p/mcp-vs-a2a-complementing-or-supplementing",
help="Enter a URL to add to the knowledge base"
)
if st.button("➕ Add URL", type="primary"):
if new_url:
if new_url not in st.session_state.knowledge_urls:
st.session_state.knowledge_urls.append(new_url)
with st.spinner("📥 Loading new documents..."):
if new_url not in st.session_state.urls_loaded:
knowledge.add_content(url=new_url)
st.session_state.urls_loaded.add(new_url)
st.success(f"✅ Added: {new_url}")
st.rerun() # Refresh to show new URL
else:
st.error("Please enter a URL")
# Main query section
st.divider()
st.subheader("🤔 Ask a Question")
# Suggested prompts
st.markdown("**Try these prompts:**")
col1, col2, col3 = st.columns(3)
with col1:
if st.button("What is MCP?", use_container_width=True):
st.session_state.query = "What is MCP (Model Context Protocol) and how does it work?"
with col2:
if st.button("MCP vs A2A", use_container_width=True):
st.session_state.query = "How do MCP and A2A protocols differ, and are they complementary or competing?"
with col3:
if st.button("Agent Communication", use_container_width=True):
st.session_state.query = "How do MCP and A2A work together in AI agent systems for communication and tool access?"
# Query input
query = st.text_area(
"Your question:",
value=st.session_state.get("query", "What is the difference between MCP and A2A protocols?"),
height=100,
help="Ask anything about the loaded knowledge sources"
)
# Run button
if st.button("🚀 Get Answer with Reasoning", type="primary"):
if query:
# Create containers for streaming updates
col1, col2 = st.columns([1, 1])
with col1:
st.markdown("### 🧠 Reasoning Process")
reasoning_container = st.container()
reasoning_placeholder = reasoning_container.empty()
with col2:
st.markdown("### 💡 Answer")
answer_container = st.container()
answer_placeholder = answer_container.empty()
# Variables to accumulate content
citations = []
answer_text = ""
reasoning_text = ""
# Stream the agent's response
with st.spinner("🔍 Searching and reasoning..."):
for chunk in agent.run(
query,
stream=True, # Enable streaming
stream_events=True, # Stream all events including reasoning
):
# Update reasoning display
if hasattr(chunk, 'reasoning_content') and chunk.reasoning_content:
reasoning_text = chunk.reasoning_content
reasoning_placeholder.markdown(
reasoning_text,
unsafe_allow_html=True
)
# Update answer display
if hasattr(chunk, 'content') and chunk.content and isinstance(chunk.content, str):
answer_text += chunk.content
answer_placeholder.markdown(
answer_text,
unsafe_allow_html=True
)
# Collect citations
if hasattr(chunk, 'citations') and chunk.citations:
if hasattr(chunk.citations, 'urls') and chunk.citations.urls:
citations = chunk.citations.urls
# Show citations if available
if citations:
st.divider()
st.subheader("📚 Sources")
for cite in citations:
title = cite.title or cite.url
st.markdown(f"- [{title}]({cite.url})")
else:
st.error("Please enter a question")
else:
# Show instructions if API keys are missing
st.info("""
👋 **Welcome! To use this app, you need:**
1. **Google API Key** - For Gemini AI model
- Sign up at [aistudio.google.com](https://aistudio.google.com/apikey)
2. **OpenAI API Key** - For embeddings
- Sign up at [platform.openai.com](https://platform.openai.com/)
Once you have both keys, enter them above to start!
""")
# Footer with explanation
st.divider()
with st.expander("📖 How This Works"):
st.markdown("""
**This app uses the Agno framework to create an intelligent Q&A system:**
1. **Knowledge Loading**: URLs are processed and stored in a vector database (LanceDB)
2. **Vector Search**: Uses OpenAI's embeddings for semantic search to find relevant information
3. **Reasoning Tools**: The agent uses special tools to think through problems step-by-step
4. **Gemini AI**: Google's Gemini model processes the information and generates answers
**Key Components:**
- `Knowledge`: Manages document loading from URLs
- `LanceDb`: Vector database for efficient similarity search
- `OpenAIEmbedder`: Converts text to embeddings using OpenAI's embedding model
- `ReasoningTools`: Enables step-by-step reasoning
- `Agent`: Orchestrates everything to answer questions
""") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/qwen_local_rag/qwen_local_rag_agent.py | rag_tutorials/qwen_local_rag/qwen_local_rag_agent.py | import os
import tempfile
from datetime import datetime
from typing import List
import streamlit as st
import bs4
from agno.agent import Agent
from agno.models.ollama import Ollama
from langchain_community.document_loaders import PyPDFLoader, WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
from langchain_core.embeddings import Embeddings
from agno.tools.exa import ExaTools
from agno.knowledge.embedder.ollama import OllamaEmbedder
class OllamaEmbedderr(Embeddings):
def __init__(self, model_name="snowflake-arctic-embed"):
"""
Initialize the OllamaEmbedderr with a specific model.
Args:
model_name (str): The name of the model to use for embedding.
"""
self.embedder = OllamaEmbedder(id=model_name, dimensions=1024)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> List[float]:
return self.embedder.get_embedding(text)
# Constants
COLLECTION_NAME = "test-qwen-r1"
# Streamlit App Initialization
st.title("🐋 Qwen 3 Local RAG Reasoning Agent")
# --- Add Model Info Boxes ---
st.info("**Qwen3:** The latest generation of large language models in Qwen series, offering a comprehensive suite of dense and mixture-of-experts (MoE) models.")
st.info("**Gemma 3:** These models are multimodal—processing text and images—and feature a 128K context window with support for over 140 languages.")
# -------------------------
# Session State Initialization
if 'model_version' not in st.session_state:
st.session_state.model_version = "qwen3:1.7b" # Default to lighter model
if 'vector_store' not in st.session_state:
st.session_state.vector_store = None
if 'processed_documents' not in st.session_state:
st.session_state.processed_documents = []
if 'history' not in st.session_state:
st.session_state.history = []
if 'exa_api_key' not in st.session_state:
st.session_state.exa_api_key = ""
if 'use_web_search' not in st.session_state:
st.session_state.use_web_search = False
if 'force_web_search' not in st.session_state:
st.session_state.force_web_search = False
if 'similarity_threshold' not in st.session_state:
st.session_state.similarity_threshold = 0.7
if 'rag_enabled' not in st.session_state:
st.session_state.rag_enabled = True # RAG is enabled by default
# Sidebar Configuration
st.sidebar.header("⚙️ Settings")
# Model Selection
st.sidebar.header("🧠 Model Choice")
model_help = """
- qwen3:1.7b: Lighter model (MoE)
- gemma3:1b: More capable but requires better GPU/RAM(32k context window)
- gemma3:4b: More capable and MultiModal (Vision)(128k context window)
- deepseek-r1:1.5b
- qwen3:8b: More capable but requires better GPU/RAM
Choose based on your hardware capabilities.
"""
st.session_state.model_version = st.sidebar.radio(
"Select Model Version",
options=["qwen3:1.7b", "gemma3:1b", "gemma3:4b", "deepseek-r1:1.5b", "qwen3:8b"],
help=model_help
)
st.sidebar.info("Run ollama pull qwen3:1.7b")
# RAG Mode Toggle
st.sidebar.header("📚 RAG Mode")
st.session_state.rag_enabled = st.sidebar.toggle("Enable RAG", value=st.session_state.rag_enabled)
# Clear Chat Button
if st.sidebar.button("✨ Clear Chat"):
st.session_state.history = []
st.rerun()
# Show API Configuration only if RAG is enabled
if st.session_state.rag_enabled:
st.sidebar.header("🔬 Search Tuning")
st.session_state.similarity_threshold = st.sidebar.slider(
"Similarity Threshold",
min_value=0.0,
max_value=1.0,
value=0.7,
help="Lower values will return more documents but might be less relevant. Higher values are more strict."
)
# Add in the sidebar configuration section, after the existing API inputs
st.sidebar.header("🌍 Web Search")
st.session_state.use_web_search = st.sidebar.checkbox("Enable Web Search Fallback", value=st.session_state.use_web_search)
if st.session_state.use_web_search:
exa_api_key = st.sidebar.text_input(
"Exa AI API Key",
type="password",
value=st.session_state.exa_api_key,
help="Required for web search fallback when no relevant documents are found"
)
st.session_state.exa_api_key = exa_api_key
# Optional domain filtering
default_domains = ["arxiv.org", "wikipedia.org", "github.com", "medium.com"]
custom_domains = st.sidebar.text_input(
"Custom domains (comma-separated)",
value=",".join(default_domains),
help="Enter domains to search from, e.g.: arxiv.org,wikipedia.org"
)
search_domains = [d.strip() for d in custom_domains.split(",") if d.strip()]
# Utility Functions
def init_qdrant() -> QdrantClient | None:
"""Initialize Qdrant client with local Docker setup.
Returns:
QdrantClient: The initialized Qdrant client if successful.
None: If the initialization fails.
"""
try:
return QdrantClient(url="http://localhost:6333")
except Exception as e:
st.error(f"🔴 Qdrant connection failed: {str(e)}")
return None
# Document Processing Functions
def process_pdf(file) -> List:
"""Process PDF file and add source metadata."""
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
tmp_file.write(file.getvalue())
loader = PyPDFLoader(tmp_file.name)
documents = loader.load()
# Add source metadata
for doc in documents:
doc.metadata.update({
"source_type": "pdf",
"file_name": file.name,
"timestamp": datetime.now().isoformat()
})
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
return text_splitter.split_documents(documents)
except Exception as e:
st.error(f"📄 PDF processing error: {str(e)}")
return []
def process_web(url: str) -> List:
"""Process web URL and add source metadata."""
try:
loader = WebBaseLoader(
web_paths=(url,),
bs_kwargs=dict(
parse_only=bs4.SoupStrainer(
class_=("post-content", "post-title", "post-header", "content", "main")
)
)
)
documents = loader.load()
# Add source metadata
for doc in documents:
doc.metadata.update({
"source_type": "url",
"url": url,
"timestamp": datetime.now().isoformat()
})
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
return text_splitter.split_documents(documents)
except Exception as e:
st.error(f"🌐 Web processing error: {str(e)}")
return []
# Vector Store Management
def create_vector_store(client, texts):
"""Create and initialize vector store with documents."""
try:
# Create collection if needed
try:
client.create_collection(
collection_name=COLLECTION_NAME,
vectors_config=VectorParams(
size=1024,
distance=Distance.COSINE
)
)
st.success(f"📚 Created new collection: {COLLECTION_NAME}")
except Exception as e:
if "already exists" not in str(e).lower():
raise e
# Initialize vector store
vector_store = QdrantVectorStore(
client=client,
collection_name=COLLECTION_NAME,
embedding=OllamaEmbedderr()
)
# Add documents
with st.spinner('📤 Uploading documents to Qdrant...'):
vector_store.add_documents(texts)
st.success("✅ Documents stored successfully!")
return vector_store
except Exception as e:
st.error(f"🔴 Vector store error: {str(e)}")
return None
def get_web_search_agent() -> Agent:
"""Initialize a web search agent."""
return Agent(
name="Web Search Agent",
model=Ollama(id="llama3.2"),
tools=[ExaTools(
api_key=st.session_state.exa_api_key,
include_domains=search_domains,
num_results=5
)],
instructions="""You are a web search expert. Your task is to:
1. Search the web for relevant information about the query
2. Compile and summarize the most relevant information
3. Include sources in your response
""",
debug_mode=True,
markdown=True,
)
def get_rag_agent() -> Agent:
"""Initialize the main RAG agent."""
return Agent(
name="Qwen 3 RAG Agent",
model=Ollama(id=st.session_state.model_version),
instructions="""You are an Intelligent Agent specializing in providing accurate answers.
When asked a question:
- Analyze the question and answer the question with what you know.
When given context from documents:
- Focus on information from the provided documents
- Be precise and cite specific details
When given web search results:
- Clearly indicate that the information comes from web search
- Synthesize the information clearly
Always maintain high accuracy and clarity in your responses.
""",
debug_mode=True,
markdown=True,
)
def check_document_relevance(query: str, vector_store, threshold: float = 0.7) -> tuple[bool, List]:
if not vector_store:
return False, []
retriever = vector_store.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 5, "score_threshold": threshold}
)
docs = retriever.invoke(query)
return bool(docs), docs
chat_col, toggle_col = st.columns([0.9, 0.1])
with chat_col:
prompt = st.chat_input("Ask about your documents..." if st.session_state.rag_enabled else "Ask me anything...")
with toggle_col:
st.session_state.force_web_search = st.toggle('🌐', help="Force web search")
# Check if RAG is enabled
if st.session_state.rag_enabled:
qdrant_client = init_qdrant()
# --- Document Upload Section (Moved to Main Area) ---
with st.expander("📁 Upload Documents or URLs for RAG", expanded=False):
if not qdrant_client:
st.warning("⚠️ Please configure Qdrant API Key and URL in the sidebar to enable document processing.")
else:
uploaded_files = st.file_uploader(
"Upload PDF files",
accept_multiple_files=True,
type='pdf'
)
url_input = st.text_input("Enter URL to scrape")
if uploaded_files:
st.write(f"Processing {len(uploaded_files)} PDF file(s)...")
all_texts = []
for file in uploaded_files:
if file.name not in st.session_state.processed_documents:
with st.spinner(f"Processing {file.name}... "):
texts = process_pdf(file)
if texts:
all_texts.extend(texts)
st.session_state.processed_documents.append(file.name)
else:
st.write(f"📄 {file.name} already processed.")
if all_texts:
with st.spinner("Creating vector store..."):
st.session_state.vector_store = create_vector_store(qdrant_client, all_texts)
if url_input:
if url_input not in st.session_state.processed_documents:
with st.spinner(f"Scraping and processing {url_input}..."):
texts = process_web(url_input)
if texts:
st.session_state.vector_store = create_vector_store(qdrant_client, texts)
st.session_state.processed_documents.append(url_input)
else:
st.write(f"🔗 {url_input} already processed.")
if st.session_state.vector_store:
st.success("Vector store is ready.")
elif not uploaded_files and not url_input:
st.info("Upload PDFs or enter a URL to populate the vector store.")
# Display sources in sidebar
if st.session_state.processed_documents:
st.sidebar.header("📚 Processed Sources")
for source in st.session_state.processed_documents:
if source.endswith('.pdf'):
st.sidebar.text(f"📄 {source}")
else:
st.sidebar.text(f"🌐 {source}")
if prompt:
# Add user message to history
st.session_state.history.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
if st.session_state.rag_enabled:
# Existing RAG flow remains unchanged
with st.spinner("🤔Evaluating the Query..."):
try:
rewritten_query = prompt
with st.expander("Evaluating the query"):
st.write(f"User's Prompt: {prompt}")
except Exception as e:
st.error(f"❌ Error rewriting query: {str(e)}")
rewritten_query = prompt
# Step 2: Choose search strategy based on force_web_search toggle
context = ""
docs = []
if not st.session_state.force_web_search and st.session_state.vector_store:
# Try document search first
retriever = st.session_state.vector_store.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={
"k": 5,
"score_threshold": st.session_state.similarity_threshold
}
)
docs = retriever.invoke(rewritten_query)
if docs:
context = "\n\n".join([d.page_content for d in docs])
st.info(f"📊 Found {len(docs)} relevant documents (similarity > {st.session_state.similarity_threshold})")
elif st.session_state.use_web_search:
st.info("🔄 No relevant documents found in database, falling back to web search...")
# Step 3: Use web search if:
# 1. Web search is forced ON via toggle, or
# 2. No relevant documents found AND web search is enabled in settings
if (st.session_state.force_web_search or not context) and st.session_state.use_web_search and st.session_state.exa_api_key:
with st.spinner("🔍 Searching the web..."):
try:
web_search_agent = get_web_search_agent()
web_results = web_search_agent.run(rewritten_query).content
if web_results:
context = f"Web Search Results:\n{web_results}"
if st.session_state.force_web_search:
st.info("ℹ️ Using web search as requested via toggle.")
else:
st.info("ℹ️ Using web search as fallback since no relevant documents were found.")
except Exception as e:
st.error(f"❌ Web search error: {str(e)}")
# Step 4: Generate response using the RAG agent
with st.spinner("🤖 Thinking..."):
try:
rag_agent = get_rag_agent()
if context:
full_prompt = f"""Context: {context}
Original Question: {prompt}
Please provide a comprehensive answer based on the available information."""
else:
full_prompt = f"Original Question: {prompt}\n"
st.info("ℹ️ No relevant information found in documents or web search.")
response = rag_agent.run(full_prompt)
# Add assistant response to history
st.session_state.history.append({
"role": "assistant",
"content": response.content
})
# Display assistant response
with st.chat_message("assistant"):
st.write(response.content)
# Show sources if available
if not st.session_state.force_web_search and 'docs' in locals() and docs:
with st.expander("🔍 See document sources"):
for i, doc in enumerate(docs, 1):
source_type = doc.metadata.get("source_type", "unknown")
source_icon = "📄" if source_type == "pdf" else "🌐"
source_name = doc.metadata.get("file_name" if source_type == "pdf" else "url", "unknown")
st.write(f"{source_icon} Source {i} from {source_name}:")
st.write(f"{doc.page_content[:200]}...")
except Exception as e:
st.error(f"❌ Error generating response: {str(e)}")
else:
# Simple mode without RAG
with st.spinner("🤖 Thinking..."):
try:
rag_agent = get_rag_agent()
web_search_agent = get_web_search_agent() if st.session_state.use_web_search else None
# Handle web search if forced or enabled
context = ""
if st.session_state.force_web_search and web_search_agent:
with st.spinner("🔍 Searching the web..."):
try:
web_results = web_search_agent.run(prompt).content
if web_results:
context = f"Web Search Results:\n{web_results}"
st.info("ℹ️ Using web search as requested.")
except Exception as e:
st.error(f"❌ Web search error: {str(e)}")
# Generate response
if context:
full_prompt = f"""Context: {context}
Question: {prompt}
Please provide a comprehensive answer based on the available information."""
else:
full_prompt = prompt
response = rag_agent.run(full_prompt)
response_content = response.content
# Extract thinking process and final response
import re
think_pattern = r'<think>(.*?)</think>'
think_match = re.search(think_pattern, response_content, re.DOTALL)
if think_match:
thinking_process = think_match.group(1).strip()
final_response = re.sub(think_pattern, '', response_content, flags=re.DOTALL).strip()
else:
thinking_process = None
final_response = response_content
# Add assistant response to history (only the final response)
st.session_state.history.append({
"role": "assistant",
"content": final_response
})
# Display assistant response
with st.chat_message("assistant"):
if thinking_process:
with st.expander("🤔 See thinking process"):
st.markdown(thinking_process)
st.markdown(final_response)
except Exception as e:
st.error(f"❌ Error generating response: {str(e)}")
else:
st.warning("You can directly talk to qwen and gemma models locally! Toggle the RAG mode to upload documents!") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/rag_agent_cohere/rag_agent_cohere.py | rag_tutorials/rag_agent_cohere/rag_agent_cohere.py | import os
import streamlit as st
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_cohere import CohereEmbeddings, ChatCohere
from langchain_qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain import hub
import tempfile
from langgraph.prebuilt import create_react_agent
from langchain_community.tools import DuckDuckGoSearchRun
from typing import TypedDict, List
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from time import sleep
from tenacity import retry, wait_exponential, stop_after_attempt
def init_session_state():
if 'api_keys_submitted' not in st.session_state:
st.session_state.api_keys_submitted = False
if 'chat_history' not in st.session_state:
st.session_state.chat_history = []
if 'vectorstore' not in st.session_state:
st.session_state.vectorstore = None
if 'qdrant_api_key' not in st.session_state:
st.session_state.qdrant_api_key = ""
if 'qdrant_url' not in st.session_state:
st.session_state.qdrant_url = ""
def sidebar_api_form():
with st.sidebar:
st.header("API Credentials")
if st.session_state.api_keys_submitted:
st.success("API credentials verified")
if st.button("Reset Credentials"):
st.session_state.clear()
st.rerun()
return True
with st.form("api_credentials"):
cohere_key = st.text_input("Cohere API Key", type="password")
qdrant_key = st.text_input("Qdrant API Key", type="password", help="Enter your Qdrant API key")
qdrant_url = st.text_input("Qdrant URL",
placeholder="https://xyz-example.eu-central.aws.cloud.qdrant.io:6333",
help="Enter your Qdrant instance URL")
if st.form_submit_button("Submit Credentials"):
try:
client = QdrantClient(url=qdrant_url, api_key=qdrant_key, timeout=60)
client.get_collections()
st.session_state.cohere_api_key = cohere_key
st.session_state.qdrant_api_key = qdrant_key
st.session_state.qdrant_url = qdrant_url
st.session_state.api_keys_submitted = True
st.success("Credentials verified!")
st.rerun()
except Exception as e:
st.error(f"Qdrant connection failed: {str(e)}")
return False
def init_qdrant() -> QdrantClient:
if not st.session_state.get("qdrant_api_key"):
raise ValueError("Qdrant API key not provided")
if not st.session_state.get("qdrant_url"):
raise ValueError("Qdrant URL not provided")
return QdrantClient(url=st.session_state.qdrant_url,
api_key=st.session_state.qdrant_api_key,
timeout=60)
init_session_state()
if not sidebar_api_form():
st.info("Please enter your API credentials in the sidebar to continue.")
st.stop()
embedding = CohereEmbeddings(model="embed-english-v3.0",
cohere_api_key=st.session_state.cohere_api_key)
chat_model = ChatCohere(model="command-r7b-12-2024",
temperature=0.1,
max_tokens=512,
verbose=True,
cohere_api_key=st.session_state.cohere_api_key)
client = init_qdrant()
def process_document(file):
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
tmp_file.write(file.getvalue())
tmp_path = tmp_file.name
loader = PyPDFLoader(tmp_path)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
texts = text_splitter.split_documents(documents)
os.unlink(tmp_path)
return texts
except Exception as e:
st.error(f"Error processing document: {e}")
return []
COLLECTION_NAME = "cohere_rag"
def create_vector_stores(texts):
"""Create and populate vector store with documents."""
try:
try:
client.create_collection(collection_name=COLLECTION_NAME,
vectors_config=VectorParams(size=1024,
distance=Distance.COSINE))
st.success(f"Created new collection: {COLLECTION_NAME}")
except Exception as e:
if "already exists" not in str(e).lower():
raise e
vector_store = QdrantVectorStore(client=client,
collection_name=COLLECTION_NAME,
embedding=embedding)
with st.spinner('Storing documents in Qdrant...'):
vector_store.add_documents(texts)
st.success("Documents successfully stored in Qdrant!")
return vector_store
except Exception as e:
st.error(f"Error in vector store creation: {str(e)}")
return None
# Define the state schema using TypedDict
class AgentState(TypedDict):
"""State schema for the agent."""
messages: List[HumanMessage | AIMessage | SystemMessage]
is_last_step: bool
class RateLimitedDuckDuckGo(DuckDuckGoSearchRun):
@retry(wait=wait_exponential(multiplier=1, min=4, max=10),
stop=stop_after_attempt(3))
def run(self, query: str) -> str:
"""Run search with rate limiting."""
try:
sleep(2) # Add delay between requests
return super().run(query)
except Exception as e:
if "Ratelimit" in str(e):
sleep(5) # Longer delay on rate limit
return super().run(query)
raise e
def create_fallback_agent(chat_model: BaseLanguageModel):
"""Create a LangGraph agent for web research."""
def web_research(query: str) -> str:
"""Web search with result formatting."""
try:
search = DuckDuckGoSearchRun(num_results=5)
results = search.run(query)
return results
except Exception as e:
return f"Search failed: {str(e)}. Providing answer based on general knowledge."
tools = [web_research]
agent = create_react_agent(model=chat_model,
tools=tools,
debug=False)
return agent
def process_query(vectorstore, query) -> tuple[str, list]:
"""Process a query using RAG with fallback to web search."""
try:
retriever = vectorstore.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={
"k": 10,
"score_threshold": 0.7
}
)
relevant_docs = retriever.get_relevant_documents(query)
if relevant_docs:
retrieval_qa_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
combine_docs_chain = create_stuff_documents_chain(chat_model, retrieval_qa_prompt)
retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain)
response = retrieval_chain.invoke({"input": query})
return response['answer'], relevant_docs
else:
st.info("No relevant documents found. Searching web...")
fallback_agent = create_fallback_agent(chat_model)
with st.spinner('Researching...'):
agent_input = {
"messages": [
HumanMessage(content=f"""Please thoroughly research the question: '{query}' and provide a detailed and comprehensive response. Make sure to gather the latest information from credible sources. Minimum 400 words.""")
],
"is_last_step": False
}
config = {"recursion_limit": 100}
try:
response = fallback_agent.invoke(agent_input, config=config)
if isinstance(response, dict) and "messages" in response:
last_message = response["messages"][-1]
answer = last_message.content if hasattr(last_message, 'content') else str(last_message)
return f"""Web Search Result:
{answer}
""", []
except Exception as agent_error:
fallback_response = chat_model.invoke(f"Please provide a general answer to: {query}").content
return f"Web search unavailable. General response: {fallback_response}", []
except Exception as e:
st.error(f"Error: {str(e)}")
return "I encountered an error. Please try rephrasing your question.", []
def post_process(answer, sources):
"""Post-process the answer and format sources."""
answer = answer.strip()
# Summarize long answers
if len(answer) > 500:
summary_prompt = f"Summarize the following answer in 2-3 sentences: {answer}"
summary = chat_model.invoke(summary_prompt).content
answer = f"{summary}\n\nFull Answer: {answer}"
formatted_sources = []
for i, source in enumerate(sources, 1):
formatted_source = f"{i}. {source.page_content[:200]}..."
formatted_sources.append(formatted_source)
return answer, formatted_sources
st.title("RAG Agent with Cohere ⌘R")
uploaded_file = st.file_uploader("Choose a PDF or Image File", type=["pdf", "jpg", "jpeg"])
if uploaded_file is not None and 'processed_file' not in st.session_state:
with st.spinner('Processing file... This may take a while for images.'):
texts = process_document(uploaded_file)
vectorstore = create_vector_stores(texts)
if vectorstore:
st.session_state.vectorstore = vectorstore
st.session_state.processed_file = True
st.success('File uploaded and processed successfully!')
else:
st.error('Failed to process file. Please try again.')
for message in st.session_state.chat_history:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if query := st.chat_input("Ask a question about the document:"):
st.session_state.chat_history.append({"role": "user", "content": query})
with st.chat_message("user"):
st.markdown(query)
if st.session_state.vectorstore:
with st.chat_message("assistant"):
try:
answer, sources = process_query(st.session_state.vectorstore, query)
st.markdown(answer)
if sources:
with st.expander("Sources"):
for source in sources:
st.markdown(f"- {source.page_content[:200]}...")
st.session_state.chat_history.append({
"role": "assistant",
"content": answer
})
except Exception as e:
st.error(f"Error: {str(e)}")
st.info("Please try asking your question again.")
else:
st.error("Please upload a document first.")
with st.sidebar:
st.divider()
col1, col2 = st.columns(2)
with col1:
if st.button('Clear Chat History'):
st.session_state.chat_history = []
st.rerun()
with col2:
if st.button('Clear All Data'):
try:
collections = client.get_collections().collections
collection_names = [col.name for col in collections]
if COLLECTION_NAME in collection_names:
client.delete_collection(COLLECTION_NAME)
if f"{COLLECTION_NAME}_compressed" in collection_names:
client.delete_collection(f"{COLLECTION_NAME}_compressed")
st.session_state.vectorstore = None
st.session_state.chat_history = []
st.success("All data cleared successfully!")
st.rerun()
except Exception as e:
st.error(f"Error clearing data: {str(e)}")
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/autonomous_rag/autorag.py | rag_tutorials/autonomous_rag/autorag.py | import streamlit as st
import nest_asyncio
from io import BytesIO
from agno.agent import Agent
from agno.document.reader.pdf_reader import PDFReader
from agno.models.openai import OpenAIChat
from agno.knowledge.pdf_url import PDFUrlKnowledgeBase
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.embedder.openai import OpenAIEmbedder
from agno.vectordb.pgvector import PgVector, SearchType
from agno.storage.agent.postgres import PostgresAgentStorage
# Apply nest_asyncio to allow nested event loops, required for running async functions in Streamlit
nest_asyncio.apply()
# Database connection string for PostgreSQL
DB_URL = "postgresql+psycopg://ai:ai@localhost:5532/ai"
# Function to set up the Assistant, utilizing caching for resource efficiency
@st.cache_resource
def setup_assistant(api_key: str) -> Agent:
"""Initializes and returns an AI Assistant agent with caching for efficiency.
This function sets up an AI Assistant agent using the OpenAI GPT-4o-mini model
and configures it with a knowledge base, storage, and web search tools. The
assistant is designed to first search its knowledge base before querying the
internet, providing clear and concise answers.
Args:
api_key (str): The API key required to access the OpenAI services.
Returns:
Agent: An initialized Assistant agent configured with a language model,
knowledge base, storage, and additional tools for enhanced functionality."""
llm = OpenAIChat(id="gpt-4o-mini", api_key=api_key)
# Set up the Assistant with storage, knowledge base, and tools
return Agent(
id="auto_rag_agent", # Name of the Assistant
model=llm, # Language model to be used
storage=PostgresAgentStorage(table_name="auto_rag_storage", db_url=DB_URL),
knowledge_base=PDFUrlKnowledgeBase(
vector_db=PgVector(
db_url=DB_URL,
collection="auto_rag_docs",
embedder=OpenAIEmbedder(id="text-embedding-ada-002", dimensions=1536, api_key=api_key),
),
num_documents=3,
),
tools=[DuckDuckGoTools()], # Additional tool for web search via DuckDuckGo
instructions=[
"Search your knowledge base first.",
"If not found, search the internet.",
"Provide clear and concise answers.",
],
show_tool_calls=True,
search_knowledge=True,
markdown=True,
debug_mode=True,
)
# Function to add a PDF document to the knowledge base
def add_document(agent: Agent, file: BytesIO):
"""Add a PDF document to the agent's knowledge base.
This function reads a PDF document from a file-like object and adds its contents to the specified agent's knowledge base. If the document is successfully read, the contents are loaded into the knowledge base with the option to upsert existing data.
Args:
agent (Agent): The agent whose knowledge base will be updated.
file (BytesIO): A file-like object containing the PDF document to be added.
Returns:
None: The function does not return a value but provides feedback on whether the operation was successful."""
reader = PDFReader()
docs = reader.read(file)
if docs:
agent.knowledge_base.load_documents(docs, upsert=True)
st.success("Document added to the knowledge base.")
else:
st.error("Failed to read the document.")
# Function to query the Assistant and return a response
def query_assistant(agent: Agent, question: str) -> str:
"""Queries the Assistant and returns a response.
Args:
agent (Agent): An instance of the Agent class used to process the query.
question (str): The question to be asked to the Assistant.
Returns:
str: The response generated by the Assistant for the given question."""
return "".join([delta for delta in agent.run(question)])
# Main function to handle Streamlit app layout and interactions
def main():
"""Main function to handle the layout and interactions for the Streamlit app.
This function sets up the Streamlit app configuration, handles user inputs such
as OpenAI API key, PDF uploads, and user questions, and interacts with an
autonomous retrieval-augmented generation (RAG) assistant based on GPT-4o.
The app allows users to upload PDF documents to enhance the knowledge base and
submit questions to receive generated responses.
Side Effects:
- Configures Streamlit page and title.
- Prompts users to input an OpenAI API key and a question.
- Allows users to upload PDF documents.
- Displays responses generated by querying an assistant.
Raises:
StreamlitWarning: If the OpenAI API key is not provided."""
st.set_page_config(page_title="AutoRAG", layout="wide")
st.title("🤖 Auto-RAG: Autonomous RAG with GPT-4o")
api_key = st.sidebar.text_input("Enter your OpenAI API Key 🔑", type="password")
if not api_key:
st.sidebar.warning("Enter your OpenAI API Key to proceed.")
st.stop()
assistant = setup_assistant(api_key)
uploaded_file = st.sidebar.file_uploader("📄 Upload PDF", type=["pdf"])
if uploaded_file and st.sidebar.button("🛠️ Add to Knowledge Base"):
add_document(assistant, BytesIO(uploaded_file.read()))
question = st.text_input("💬 Ask Your Question:")
# When the user submits a question, query the assistant for an answer
if st.button("🔍 Get Answer"):
# Ensure the question is not empty
if question.strip():
with st.spinner("🤔 Thinking..."):
# Query the assistant and display the response
answer = query_assistant(assistant, question)
st.write("📝 **Response:**", answer.content)
else:
# Show an error if the question input is empty
st.error("Please enter a question.")
# Entry point of the application
if __name__ == "__main__":
main()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/gemini_agentic_rag/agentic_rag_gemini.py | rag_tutorials/gemini_agentic_rag/agentic_rag_gemini.py | import os
import tempfile
from datetime import datetime
from typing import List
import streamlit as st
import google.generativeai as genai
import bs4
from agno.agent import Agent
from agno.models.google import Gemini
from langchain_community.document_loaders import PyPDFLoader, WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
from langchain_core.embeddings import Embeddings
from agno.tools.exa import ExaTools
class GeminiEmbedder(Embeddings):
def __init__(self, model_name="models/text-embedding-004"):
genai.configure(api_key=st.session_state.google_api_key)
self.model = model_name
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> List[float]:
response = genai.embed_content(
model=self.model,
content=text,
task_type="retrieval_document"
)
return response['embedding']
# Constants
COLLECTION_NAME = "gemini-thinking-agent-agno"
# Streamlit App Initialization
st.title("🤔 Agentic RAG with Gemini Thinking and Agno")
# Session State Initialization
if 'google_api_key' not in st.session_state:
st.session_state.google_api_key = ""
if 'qdrant_api_key' not in st.session_state:
st.session_state.qdrant_api_key = ""
if 'qdrant_url' not in st.session_state:
st.session_state.qdrant_url = ""
if 'vector_store' not in st.session_state:
st.session_state.vector_store = None
if 'processed_documents' not in st.session_state:
st.session_state.processed_documents = []
if 'history' not in st.session_state:
st.session_state.history = []
if 'exa_api_key' not in st.session_state:
st.session_state.exa_api_key = ""
if 'use_web_search' not in st.session_state:
st.session_state.use_web_search = False
if 'force_web_search' not in st.session_state:
st.session_state.force_web_search = False
if 'similarity_threshold' not in st.session_state:
st.session_state.similarity_threshold = 0.7
# Sidebar Configuration
st.sidebar.header("🔑 API Configuration")
google_api_key = st.sidebar.text_input("Google API Key", type="password", value=st.session_state.google_api_key)
qdrant_api_key = st.sidebar.text_input("Qdrant API Key", type="password", value=st.session_state.qdrant_api_key)
qdrant_url = st.sidebar.text_input("Qdrant URL",
placeholder="https://your-cluster.cloud.qdrant.io:6333",
value=st.session_state.qdrant_url)
# Clear Chat Button
if st.sidebar.button("🗑️ Clear Chat History"):
st.session_state.history = []
st.rerun()
# Update session state
st.session_state.google_api_key = google_api_key
st.session_state.qdrant_api_key = qdrant_api_key
st.session_state.qdrant_url = qdrant_url
# Add in the sidebar configuration section, after the existing API inputs
st.sidebar.header("🌐 Web Search Configuration")
st.session_state.use_web_search = st.sidebar.checkbox("Enable Web Search Fallback", value=st.session_state.use_web_search)
if st.session_state.use_web_search:
exa_api_key = st.sidebar.text_input(
"Exa AI API Key",
type="password",
value=st.session_state.exa_api_key,
help="Required for web search fallback when no relevant documents are found"
)
st.session_state.exa_api_key = exa_api_key
# Optional domain filtering
default_domains = ["arxiv.org", "wikipedia.org", "github.com", "medium.com"]
custom_domains = st.sidebar.text_input(
"Custom domains (comma-separated)",
value=",".join(default_domains),
help="Enter domains to search from, e.g.: arxiv.org,wikipedia.org"
)
search_domains = [d.strip() for d in custom_domains.split(",") if d.strip()]
# Add this to the sidebar configuration section
st.sidebar.header("🎯 Search Configuration")
st.session_state.similarity_threshold = st.sidebar.slider(
"Document Similarity Threshold",
min_value=0.0,
max_value=1.0,
value=0.7,
help="Lower values will return more documents but might be less relevant. Higher values are more strict."
)
# Utility Functions
def init_qdrant():
"""Initialize Qdrant client with configured settings."""
if not all([st.session_state.qdrant_api_key, st.session_state.qdrant_url]):
return None
try:
return QdrantClient(
url=st.session_state.qdrant_url,
api_key=st.session_state.qdrant_api_key,
timeout=60
)
except Exception as e:
st.error(f"🔴 Qdrant connection failed: {str(e)}")
return None
# Document Processing Functions
def process_pdf(file) -> List:
"""Process PDF file and add source metadata."""
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
tmp_file.write(file.getvalue())
loader = PyPDFLoader(tmp_file.name)
documents = loader.load()
# Add source metadata
for doc in documents:
doc.metadata.update({
"source_type": "pdf",
"file_name": file.name,
"timestamp": datetime.now().isoformat()
})
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
return text_splitter.split_documents(documents)
except Exception as e:
st.error(f"📄 PDF processing error: {str(e)}")
return []
def process_web(url: str) -> List:
"""Process web URL and add source metadata."""
try:
loader = WebBaseLoader(
web_paths=(url,),
bs_kwargs=dict(
parse_only=bs4.SoupStrainer(
class_=("post-content", "post-title", "post-header", "content", "main")
)
)
)
documents = loader.load()
# Add source metadata
for doc in documents:
doc.metadata.update({
"source_type": "url",
"url": url,
"timestamp": datetime.now().isoformat()
})
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
return text_splitter.split_documents(documents)
except Exception as e:
st.error(f"🌐 Web processing error: {str(e)}")
return []
# Vector Store Management
def create_vector_store(client, texts):
"""Create and initialize vector store with documents."""
try:
# Create collection if needed
try:
client.create_collection(
collection_name=COLLECTION_NAME,
vectors_config=VectorParams(
size=768, # Gemini embedding-004 dimension
distance=Distance.COSINE
)
)
st.success(f"📚 Created new collection: {COLLECTION_NAME}")
except Exception as e:
if "already exists" not in str(e).lower():
raise e
# Initialize vector store
vector_store = QdrantVectorStore(
client=client,
collection_name=COLLECTION_NAME,
embedding=GeminiEmbedder()
)
# Add documents
with st.spinner('📤 Uploading documents to Qdrant...'):
vector_store.add_documents(texts)
st.success("✅ Documents stored successfully!")
return vector_store
except Exception as e:
st.error(f"🔴 Vector store error: {str(e)}")
return None
# Add this after the GeminiEmbedder class
def get_query_rewriter_agent() -> Agent:
"""Initialize a query rewriting agent."""
return Agent(
name="Query Rewriter",
model=Gemini(id="gemini-exp-1206"),
instructions="""You are an expert at reformulating questions to be more precise and detailed.
Your task is to:
1. Analyze the user's question
2. Rewrite it to be more specific and search-friendly
3. Expand any acronyms or technical terms
4. Return ONLY the rewritten query without any additional text or explanations
Example 1:
User: "What does it say about ML?"
Output: "What are the key concepts, techniques, and applications of Machine Learning (ML) discussed in the context?"
Example 2:
User: "Tell me about transformers"
Output: "Explain the architecture, mechanisms, and applications of Transformer neural networks in natural language processing and deep learning"
""",
show_tool_calls=False,
markdown=True,
)
def get_web_search_agent() -> Agent:
"""Initialize a web search agent."""
return Agent(
name="Web Search Agent",
model=Gemini(id="gemini-exp-1206"),
tools=[ExaTools(
api_key=st.session_state.exa_api_key,
include_domains=search_domains,
num_results=5
)],
instructions="""You are a web search expert. Your task is to:
1. Search the web for relevant information about the query
2. Compile and summarize the most relevant information
3. Include sources in your response
""",
show_tool_calls=True,
markdown=True,
)
def get_rag_agent() -> Agent:
"""Initialize the main RAG agent."""
return Agent(
name="Gemini RAG Agent",
model=Gemini(id="gemini-2.0-flash-thinking-exp-01-21"),
instructions="""You are an Intelligent Agent specializing in providing accurate answers.
When given context from documents:
- Focus on information from the provided documents
- Be precise and cite specific details
When given web search results:
- Clearly indicate that the information comes from web search
- Synthesize the information clearly
Always maintain high accuracy and clarity in your responses.
""",
show_tool_calls=True,
markdown=True,
)
def check_document_relevance(query: str, vector_store, threshold: float = 0.7) -> tuple[bool, List]:
"""
Check if documents in vector store are relevant to the query.
Args:
query: The search query
vector_store: The vector store to search in
threshold: Similarity threshold
Returns:
tuple[bool, List]: (has_relevant_docs, relevant_docs)
"""
if not vector_store:
return False, []
retriever = vector_store.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 5, "score_threshold": threshold}
)
docs = retriever.invoke(query)
return bool(docs), docs
# Main Application Flow
if st.session_state.google_api_key:
os.environ["GOOGLE_API_KEY"] = st.session_state.google_api_key
genai.configure(api_key=st.session_state.google_api_key)
qdrant_client = init_qdrant()
# File/URL Upload Section
st.sidebar.header("📁 Data Upload")
uploaded_file = st.sidebar.file_uploader("Upload PDF", type=["pdf"])
web_url = st.sidebar.text_input("Or enter URL")
# Process documents
if uploaded_file:
file_name = uploaded_file.name
if file_name not in st.session_state.processed_documents:
with st.spinner('Processing PDF...'):
texts = process_pdf(uploaded_file)
if texts and qdrant_client:
if st.session_state.vector_store:
st.session_state.vector_store.add_documents(texts)
else:
st.session_state.vector_store = create_vector_store(qdrant_client, texts)
st.session_state.processed_documents.append(file_name)
st.success(f"✅ Added PDF: {file_name}")
if web_url:
if web_url not in st.session_state.processed_documents:
with st.spinner('Processing URL...'):
texts = process_web(web_url)
if texts and qdrant_client:
if st.session_state.vector_store:
st.session_state.vector_store.add_documents(texts)
else:
st.session_state.vector_store = create_vector_store(qdrant_client, texts)
st.session_state.processed_documents.append(web_url)
st.success(f"✅ Added URL: {web_url}")
# Display sources in sidebar
if st.session_state.processed_documents:
st.sidebar.header("📚 Processed Sources")
for source in st.session_state.processed_documents:
if source.endswith('.pdf'):
st.sidebar.text(f"📄 {source}")
else:
st.sidebar.text(f"🌐 {source}")
# Chat Interface
# Create two columns for chat input and search toggle
chat_col, toggle_col = st.columns([0.9, 0.1])
with chat_col:
prompt = st.chat_input("Ask about your documents...")
with toggle_col:
st.session_state.force_web_search = st.toggle('🌐', help="Force web search")
if prompt:
# Add user message to history
st.session_state.history.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
# Step 1: Rewrite the query for better retrieval
with st.spinner("🤔 Reformulating query..."):
try:
query_rewriter = get_query_rewriter_agent()
rewritten_query = query_rewriter.run(prompt).content
with st.expander("🔄 See rewritten query"):
st.write(f"Original: {prompt}")
st.write(f"Rewritten: {rewritten_query}")
except Exception as e:
st.error(f"❌ Error rewriting query: {str(e)}")
rewritten_query = prompt
# Step 2: Choose search strategy based on force_web_search toggle
context = ""
docs = []
if not st.session_state.force_web_search and st.session_state.vector_store:
# Try document search first
retriever = st.session_state.vector_store.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={
"k": 5,
"score_threshold": st.session_state.similarity_threshold
}
)
docs = retriever.invoke(rewritten_query)
if docs:
context = "\n\n".join([d.page_content for d in docs])
st.info(f"📊 Found {len(docs)} relevant documents (similarity > {st.session_state.similarity_threshold})")
elif st.session_state.use_web_search:
st.info("🔄 No relevant documents found in database, falling back to web search...")
# Step 3: Use web search if:
# 1. Web search is forced ON via toggle, or
# 2. No relevant documents found AND web search is enabled in settings
if (st.session_state.force_web_search or not context) and st.session_state.use_web_search and st.session_state.exa_api_key:
with st.spinner("🔍 Searching the web..."):
try:
web_search_agent = get_web_search_agent()
web_results = web_search_agent.run(rewritten_query).content
if web_results:
context = f"Web Search Results:\n{web_results}"
if st.session_state.force_web_search:
st.info("ℹ️ Using web search as requested via toggle.")
else:
st.info("ℹ️ Using web search as fallback since no relevant documents were found.")
except Exception as e:
st.error(f"❌ Web search error: {str(e)}")
# Step 4: Generate response using the RAG agent
with st.spinner("🤖 Thinking..."):
try:
rag_agent = get_rag_agent()
if context:
full_prompt = f"""Context: {context}
Original Question: {prompt}
Rewritten Question: {rewritten_query}
Please provide a comprehensive answer based on the available information."""
else:
full_prompt = f"Original Question: {prompt}\nRewritten Question: {rewritten_query}"
st.info("ℹ️ No relevant information found in documents or web search.")
response = rag_agent.run(full_prompt)
# Add assistant response to history
st.session_state.history.append({
"role": "assistant",
"content": response.content
})
# Display assistant response
with st.chat_message("assistant"):
st.write(response.content)
# Show sources if available
if not st.session_state.force_web_search and 'docs' in locals() and docs:
with st.expander("🔍 See document sources"):
for i, doc in enumerate(docs, 1):
source_type = doc.metadata.get("source_type", "unknown")
source_icon = "📄" if source_type == "pdf" else "🌐"
source_name = doc.metadata.get("file_name" if source_type == "pdf" else "url", "unknown")
st.write(f"{source_icon} Source {i} from {source_name}:")
st.write(f"{doc.page_content[:200]}...")
except Exception as e:
st.error(f"❌ Error generating response: {str(e)}")
else:
st.warning("⚠️ Please enter your Google API Key to continue") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/agentic_rag_math_agent/app/benchmark.py | rag_tutorials/agentic_rag_math_agent/app/benchmark.py | # Add the project root to the Python path
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import pandas as pd
import time
from datetime import datetime
from rag.query_router import answer_math_question
from data.load_gsm8k_data import load_jeebench_dataset
def benchmark_math_agent(limit: int = 10):
# ✅ Always filter math-only questions
df = load_jeebench_dataset()
df = df.head(limit) # Limit the number of questions for benchmarking
total = len(df)
correct = 0
results = []
for idx, row in df.iterrows():
question = row["question"]
expected = row["gold"]
start = time.time()
try:
response = answer_math_question(question)
is_correct = expected.lower() in response.lower()
if is_correct:
correct += 1
results.append({
"Question": question,
"Expected": expected,
"Predicted": response,
"Correct": is_correct,
"TimeTakenSec": round(time.time() - start, 2)
})
except Exception as e:
results.append({
"Question": question,
"Expected": expected,
"Predicted": f"Error: {e}",
"Correct": False,
"TimeTakenSec": None
})
df_result = pd.DataFrame(results)
accuracy = correct / total * 100
return df_result, accuracy
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/agentic_rag_math_agent/app/streamlit.py | rag_tutorials/agentic_rag_math_agent/app/streamlit.py | import streamlit as st
import sys
import os
import json
import pandas as pd
# Add root to import path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from app.benchmark import benchmark_math_agent # Add this import
from data.load_gsm8k_data import load_jeebench_dataset
from rag.query_router import answer_math_question
st.set_page_config(page_title="Math Agent 🧮", layout="wide")
st.title("🧠 Math Tutor Agent Dashboard")
tab1, tab2, tab3 = st.tabs(["📘 Ask a Question", "📁 View Feedback", "📊 Benchmark Results"])
# ---------------- TAB 1: Ask a Question ---------------- #
with tab1:
st.subheader("📘 Ask a Math Question")
st.markdown("Enter any math question below. The agent will try to explain it step-by-step.")
if "last_question" not in st.session_state:
st.session_state["last_question"] = ""
if "last_answer" not in st.session_state:
st.session_state["last_answer"] = ""
if "feedback_given" not in st.session_state:
st.session_state["feedback_given"] = False
user_question = st.text_input("Your Question:")
if st.button("Get Answer"):
if user_question:
with st.spinner("Thinking..."):
answer = answer_math_question(user_question)
st.session_state["last_question"] = user_question
st.session_state["last_answer"] = answer
st.session_state["feedback_given"] = False
if st.session_state["last_answer"]:
st.markdown("### ✅ Answer:")
st.success(st.session_state["last_answer"])
if not st.session_state["feedback_given"]:
st.markdown("### 🙋 Was this helpful?")
col1, col2 = st.columns(2)
with col1:
if st.button("👍 Yes"):
feedback = "positive"
st.session_state["feedback_given"] = True
with col2:
if st.button("👎 No"):
feedback = "negative"
st.session_state["feedback_given"] = True
if st.session_state["feedback_given"]:
log_entry = {
"question": st.session_state["last_question"],
"answer": st.session_state["last_answer"],
"feedback": feedback
}
try:
os.makedirs("logs", exist_ok=True)
log_file = "logs/feedback_log.json"
if os.path.exists(log_file):
with open(log_file, "r") as f:
existing_logs = json.load(f)
else:
existing_logs = []
existing_logs.append(log_entry)
with open(log_file, "w") as f:
json.dump(existing_logs, f, indent=2)
st.success(f"✅ Feedback recorded as '{feedback}'")
st.write("📝 Log entry:", log_entry)
except Exception as e:
st.error(f"⚠️ Error saving feedback: {e}")
# ---------------- TAB 2: View Feedback ---------------- #
with tab2:
st.subheader("📁 View Collected Feedback")
try:
with open("logs/feedback_log.json", "r") as f:
feedback_logs = json.load(f)
st.success("Loaded feedback log.")
st.dataframe(pd.DataFrame(feedback_logs))
except Exception as e:
st.warning("No feedback log found or error loading.")
st.text(str(e))
# ---------------- TAB 3: Benchmark Results ---------------- #
with tab3:
st.subheader("📊 Benchmark Accuracy Report")
total_math = len(load_jeebench_dataset())
st.caption(f"📘 Benchmarking from {total_math} math questions")
num_questions = st.slider("Select number of math questions to benchmark", min_value=3, max_value=total_math, value=10)
if st.button("▶️ Run Benchmark Now"):
with st.spinner(f"Benchmarking {num_questions} math questions..."):
df_result, accuracy = benchmark_math_agent(limit=num_questions)
# Save the result
os.makedirs("benchmark", exist_ok=True)
result_path = f"benchmark/results_math_{num_questions}.csv"
df_result.to_csv(result_path, index=False)
# Show result
st.success(f"✅ Done! Accuracy: {accuracy:.2f}%")
st.metric("Accuracy", f"{accuracy:.2f}%")
st.dataframe(df_result)
st.download_button("Download Results", data=df_result.to_csv(index=False), file_name=result_path, mime="text/csv") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/agentic_rag_math_agent/rag/vector.py | rag_tutorials/agentic_rag_math_agent/rag/vector.py | from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.core.schema import Document
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.embeddings.openai import OpenAIEmbedding
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
from dotenv import load_dotenv
import pandas as pd
import os
# ✅ Load environment variables
load_dotenv("config/.env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# ✅ Load JEEBench dataset as Documents
def load_jeebench_documents():
df = pd.read_json("hf://datasets/daman1209arora/jeebench/test.json")
documents = []
for i, row in df.iterrows():
q = row["question"]
a = row["gold"]
text = f"Q: {q}\nA: {a}"
doc = Document(text=text, metadata={"source": "jee_bench", "index": i})
documents.append(doc)
return documents
# ✅ Build the vector index using Qdrant
def build_vector_index():
documents = load_jeebench_documents()
node_parser = SimpleNodeParser()
nodes = node_parser.get_nodes_from_documents(documents)
qdrant_client = QdrantClient(host="localhost", port=6333)
collection_name = "math_agent"
if not qdrant_client.collection_exists(collection_name=collection_name):
qdrant_client.create_collection(
collection_name=collection_name,
vectors_config=VectorParams(size=1536, distance=Distance.COSINE)
)
vector_store = QdrantVectorStore(client=qdrant_client, collection_name=collection_name)
embed_model = OpenAIEmbedding(api_key=OPENAI_API_KEY)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes=nodes, embed_model=embed_model, storage_context=storage_context)
index.storage_context.persist()
print("✅ Qdrant vector index built and saved successfully.")
if __name__ == "__main__":
build_vector_index()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/agentic_rag_math_agent/rag/guardrails.py | rag_tutorials/agentic_rag_math_agent/rag/guardrails.py | import dspy
import os
from dotenv import load_dotenv
# Load API key
load_dotenv("config/.env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
print("🔐 Loaded OPENAI_API_KEY:", "✅ Found" if OPENAI_API_KEY else "❌ Missing")
# Configure LM
lm = dspy.LM(model="gpt-4o", api_key=OPENAI_API_KEY)
dspy.configure(lm=lm)
# ✅ Signature for Input Guard
class ClassifyMath(dspy.Signature):
"""
Decide if a question is related to mathematics — this includes problem-solving,
formulas, definitions (e.g., 'what is calculus'),examples to any topic, or theoretical topics.
Return only 'Yes' or 'No' as your final verdict.
"""
question: str = dspy.InputField()
verdict: str = dspy.OutputField(desc="Respond with 'Yes' if the question is related to mathematics, 'No' otherwise.")
# ✅ Input Validator
class InputValidator(dspy.Module):
def __init__(self):
super().__init__()
self.classifier = dspy.Predict(ClassifyMath)
self.validate_question = dspy.ChainOfThought(
ClassifyMath,
examples=[
{"question": "What is the derivative of x^2?", "verdict": "Yes"},
{"question": "Explain the chain rule in calculus.", "verdict": "Yes"},
{"question": "Why do I need to learn algebra?", "verdict": "Yes"},
{"question": "What is the Pythagorean theorem?", "verdict": "Yes"},
{"question": "How do I solve a quadratic equation?", "verdict": "Yes"},
{"question": "What is the area of a circle?", "verdict": "Yes"},
{"question": "How is math used in real life?", "verdict": "Yes"},
{"question": "What is the purpose of trigonometry?", "verdict": "Yes"},
{"question": "What is the Fibonacci sequence?", "verdict": "Yes"},
{"question": "can you tell me about rhombus?", "verdict": "Yes"},
{"question": "what is a circle?", "verdict": "Yes"},
{"question": "What is the formula for the area of a circle?", "verdict": "Yes"},
{"question": "What is the formula for the circumference of a circle?", "verdict": "Yes"},
{"question": "What is the formula for the volume of a cone?", "verdict": "Yes"},
{"question": "What is the formula for the area of a parallelogram?", "verdict": "Yes"},
{"question": "What is the formula for the area of a trapezoid?", "verdict": "Yes"},
{"question": "What is the formula for the surface area of a cube?", "verdict": "Yes"},
{"question": "What is the area of parallelogram?", "verdict": "Yes"},
{"question": "What is a square?", "verdict": "Yes"},
{"question": "Explain rectangle?", "verdict": "Yes"},
{"question": "can you tell me about pentagon?", "verdict": "Yes"},
{"question": "What is the formula for the volume of a sphere?", "verdict": "Yes"},
{"question": "What is the difference between a mean and median?", "verdict": "Yes"},
{"question": "What is the formula for the area of a triangle?", "verdict": "Yes"},
{"question": "What is the difference between a permutation and a combination?", "verdict": "Yes"},
{"question": "What is the formula for the slope of a line?", "verdict": "Yes"},
{"question": "What is the difference between a rational and irrational number?", "verdict": "Yes"},
{"question": "What is the formula for the area of a rectangle?", "verdict": "Yes"},
{"question": "What is the formula for the volume of a cylinder?", "verdict": "Yes"},
{"question": "What is the formula for the area of a trapezoid?", "verdict": "Yes"},
{"question": "What is the formula for the surface area of a sphere?", "verdict": "Yes"},
{"question": "What is the formula for the surface area of a cylinder?", "verdict": "Yes"},
{"question": "What is the integral of sin(x)?", "verdict": "Yes"},
{"question": "What is the difference between mean and median?", "verdict": "Yes"},
{"question": "What is the formula for the circumference of a circle?", "verdict": "Yes"},
{"question": "What is the quadratic formula?", "verdict": "Yes"},
{"question": "Tell me a good movie to watch.", "verdict": "No"},
{"question": "What is AI?", "verdict": "No"},
]
)
def forward(self, question):
response = self.classifier(question=question)
print("🧠 InputValidator Response:", response.verdict)
return response.verdict.lower().strip() == "yes"
# ✅ Output Validator (no change unless needed)
class OutputValidator(dspy.Module):
class ValidateAnswer(dspy.Signature):
"""Check if the answer is correct, step-by-step, and relevant to the question."""
question = dspy.InputField(desc="The original math question.")
answer = dspy.InputField(desc="The model-generated answer.")
verdict = dspy.OutputField(desc="Answer only 'Yes' or 'No'")
def __init__(self):
super().__init__()
self.validate_answer = dspy.Predict(self.ValidateAnswer)
def forward(self, question, answer):
response = self.validate_answer(
question=question,
answer=answer
)
print("🧠 OutputValidator Response:", response.verdict)
return response.verdict.lower().strip() == "yes"
# Initialize validators
input_validator = InputValidator()
output_validator = OutputValidator()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/agentic_rag_math_agent/rag/query_router.py | rag_tutorials/agentic_rag_math_agent/rag/query_router.py | # rag/query_router.py
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import os
import requests
import openai
import json
import inspect
from llama_index.core import StorageContext,load_index_from_storage
from dotenv import load_dotenv
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from rag.guardrails import OutputValidator, InputValidator
# Load environment variables
load_dotenv("config/.env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
# Load DSPy guardrails
output_validator = OutputValidator()
input_validator = InputValidator()
def load_kb_index():
qdrant_client = QdrantClient(host="localhost", port=6333)
vector_store = QdrantVectorStore(client=qdrant_client, collection_name="math_agent")
storage_context = StorageContext.from_defaults(persist_dir="storage",vector_store=vector_store)
index = load_index_from_storage(storage_context)
return index
def query_kb(question: str):
index = load_kb_index()
nodes = index.as_retriever(similarity_top_k=1).retrieve(question)
if not nodes:
return "I'm not sure.", 0.0
node = nodes[0]
matched_text = node.get_text()
similarity = node.score or 0.0
print(f"🔍 Matched Score: {similarity}")
print(f"🧠 Matched Content: {matched_text}")
return matched_text, similarity
def query_web(question: str):
url = "https://api.tavily.com/search"
headers = {"Content-Type": "application/json"}
payload = {
"api_key": TAVILY_API_KEY,
"query": question,
"search_depth": "basic",
"include_answer": True,
"include_raw_content": False
}
response = requests.post(url, json=payload, headers=headers)
data = response.json()
return data.get("answer", "No answer found.")
def explain_with_openai(question: str, web_content: str):
prompt = f"""
You are a friendly and precise math tutor.
The student asked: "{question}"
Below is some information retrieved from the web. If it's helpful, use it to explain the answer. If it's incorrect or irrelevant, ignore it and instead explain the answer accurately based on your own math knowledge.
Web Content:
\"\"\"
{web_content}
\"\"\"
Now write a clear, accurate, and step-by-step explanation of the student's question.
Only include valid math steps — do not guess or make up answers.
"""
llm = OpenAI(api_key=OPENAI_API_KEY, model="gpt-4o")
response = llm.complete(prompt)
return response.text
def answer_math_question(question: str):
print(f"🔍 Query: {question}")
if not input_validator.forward(question):
return "⚠️ This assistant only answers math-related academic questions."
answer = ""
from_kb = False
try:
kb_answer, similarity = query_kb(question)
print("🧪 KB raw answer:", kb_answer)
if similarity > 0.:
print("✅ High similarity KB match, using GPT for step-by-step explanation...")
prompt = f"""
You are a helpful math tutor.
Here is a student's question:
\"\"\"
{question}
\"\"\"
And here is the correct answer retrieved from a trusted academic knowledge base:
\"\"\"
{kb_answer}
\"\"\"
Your job is to explain to the student step-by-step **why** this is the correct answer.
Do not change the final answer. You are only allowed to explain what is already given.
Use the KB content as your only source. Do not guess or recalculate.
"""
llm = OpenAI(api_key=OPENAI_API_KEY, model="gpt-4o")
answer = llm.complete(prompt).text
from_kb = True
else:
raise ValueError("Low similarity match or empty")
except Exception as e:
print("⚠️ Using Web fallback because:", e)
web_content = query_web(question)
answer = explain_with_openai(question, web_content)
from_kb = False
print(f"📦 Answer Source: {'KB' if from_kb else 'Web'}")
# Final Output Guardrail Check
if not output_validator.forward(question, answer):
print("⚠️ Final answer failed validation — retrying with web content...")
web_content = query_web(question)
answer = explain_with_openai(question, web_content)
from_kb = False
return answer
if __name__ == "__main__":
question = """
In a historical experiment to determine Planck's constant, a metal surface was irradiated with light of different wavelengths.
The emitted photoelectron energies were measured by applying a stopping potential.
The relevant data for the wavelength (λ) of incident light and the corresponding stopping potential (V₀) are given below:
λ (μm) | V₀ (V)
0.3 | 2.0
0.4 | 1.0
0.5 | 0.4
Given that c = 3×10⁸ m/s and e = 1.6×10⁻¹⁹ C, Planck's constant (in Js) found from such an experiment is:
(A) 6.0×10⁻³⁴
(B) 6.4×10⁻³⁴
(C) 6.6×10⁻³⁴
(D) 6.8×10⁻³⁴
"""
answer = answer_math_question(question)
print("\n🧠 Final Answer:\n", answer)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/agentic_rag_math_agent/data/load_gsm8k_data.py | rag_tutorials/agentic_rag_math_agent/data/load_gsm8k_data.py | import pandas as pd
def load_jeebench_dataset():
df = pd.read_json("hf://datasets/daman1209arora/jeebench/test.json")
df = df[df["subject"].str.lower() == "math"]
return df[['question', 'gold']]
if __name__ == "__main__":
load_jeebench_dataset()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/rag_chain/app.py | rag_tutorials/rag_chain/app.py | import os
import streamlit as st
from langchain_google_genai import GoogleGenerativeAIEmbeddings
from langchain_chroma import Chroma
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters.sentence_transformers import SentenceTransformersTokenTextSplitter
from langchain_core.prompts import ChatPromptTemplate
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
# Initialize embedding model
embedding_model = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
# Initialize pharma database
db = Chroma(collection_name="pharma_database",
embedding_function=embedding_model,
persist_directory='./pharma_db')
def format_docs(docs):
"""Formats a list of document objects into a single string.
Args:
docs (list): A list of document objects, each having a 'page_content' attribute.
Returns:
str: A single string containing the page content from each document,
separated by double newlines."""
return "\n\n".join(doc.page_content for doc in docs)
def add_to_db(uploaded_files):
"""Processes and adds uploaded PDF files to the database.
This function checks if any files have been uploaded. If files are uploaded,
it saves each file to a temporary location, processes the content using a PDF loader,
and splits the content into smaller chunks. Each chunk, along with its metadata,
is then added to the database. Temporary files are removed after processing.
Args:
uploaded_files (list): A list of uploaded file objects to be processed.
Returns:
None"""
# Check if files are uploaded
if not uploaded_files:
st.error("No files uploaded!")
return
for uploaded_file in uploaded_files:
# Save the uploaded file to a temporary path
temp_file_path = os.path.join("./temp", uploaded_file.name)
os.makedirs(os.path.dirname(temp_file_path), exist_ok=True)
with open(temp_file_path, "wb") as temp_file:
temp_file.write(uploaded_file.getbuffer())
# Load the file using PyPDFLoader
loader = PyPDFLoader(temp_file_path)
data = loader.load()
# Store metadata and content
doc_metadata = [data[i].metadata for i in range(len(data))]
doc_content = [data[i].page_content for i in range(len(data))]
# Split documents into smaller chunks
st_text_splitter = SentenceTransformersTokenTextSplitter(
model_name="sentence-transformers/all-mpnet-base-v2",
chunk_size=100,
chunk_overlap=50
)
st_chunks = st_text_splitter.create_documents(doc_content, doc_metadata)
# Add chunks to database
db.add_documents(st_chunks)
# Remove the temporary file after processing
os.remove(temp_file_path)
def run_rag_chain(query):
"""Processes a query using a Retrieval-Augmented Generation (RAG) chain.
This function utilizes a RAG chain to answer a given query. It retrieves
relevant context using similarity search and then generates a response
based on this context using a chat model. The chat model is pre-configured
with a prompt template specialized in pharmaceutical sciences.
Args:
query (str): The user's question that needs to be answered.
Returns:
str: A response generated by the chat model, based on the retrieved context."""
# Create a Retriever Object and apply Similarity Search
retriever = db.as_retriever(search_type="similarity", search_kwargs={'k': 5})
# Initialize a Chat Prompt Template
PROMPT_TEMPLATE = """
You are a highly knowledgeable assistant specializing in pharmaceutical sciences.
Answer the question based only on the following context:
{context}
Answer the question based on the above context:
{question}
Use the provided context to answer the user's question accurately and concisely.
Don't justify your answers.
Don't give information not mentioned in the CONTEXT INFORMATION.
Do not say "according to the context" or "mentioned in the context" or similar.
"""
prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
# Initialize a Generator (i.e. Chat Model)
chat_model = ChatGoogleGenerativeAI(
model="gemini-1.5-pro",
api_key=st.session_state.get("gemini_api_key"),
temperature=1
)
# Initialize a Output Parser
output_parser = StrOutputParser()
# RAG Chain
rag_chain = {"context": retriever | format_docs, "question": RunnablePassthrough()} | prompt_template | chat_model | output_parser
# Invoke the Chain
response = rag_chain.invoke(query)
return response
def main():
"""Initialize and manage the PharmaQuery application interface.
This function sets up the Streamlit application interface for PharmaQuery,
a Pharmaceutical Insight Retrieval System. Users can enter queries related
to the pharmaceutical industry, upload research documents, and manage API
keys for enhanced functionality.
The main features include:
- Query input area for users to ask questions about the pharmaceutical industry.
- Submission button to process the query and display the retrieved insights.
- Sidebar for API key input and management.
- File uploader for adding research documents to the database, enhancing query responses.
Args:
None
Returns:
None"""
st.set_page_config(page_title="PharmaQuery", page_icon=":microscope:")
st.header("Pharmaceutical Insight Retrieval System")
query = st.text_area(
":bulb: Enter your query about the Pharmaceutical Industry:",
placeholder="e.g., What are the AI applications in drug discovery?"
)
if st.button("Submit"):
if not query:
st.warning("Please ask a question")
else:
with st.spinner("Thinking..."):
result = run_rag_chain(query=query)
st.write(result)
with st.sidebar:
st.title("API Keys")
gemini_api_key = st.text_input("Enter your Gemini API key:", type="password")
if st.button("Enter"):
if gemini_api_key:
st.session_state.gemini_api_key = gemini_api_key
st.success("API key saved!")
else:
st.warning("Please enter your Gemini API key to proceed.")
with st.sidebar:
st.markdown("---")
pdf_docs = st.file_uploader("Upload your research documents related to Pharmaceutical Sciences (Optional) :memo:",
type=["pdf"],
accept_multiple_files=True
)
if st.button("Submit & Process"):
if not pdf_docs:
st.warning("Please upload the file")
else:
with st.spinner("Processing your documents..."):
add_to_db(pdf_docs)
st.success(":file_folder: Documents successfully added to the database!")
# Sidebar Footer
st.sidebar.write("Built with ❤️ by [Charan](https://www.linkedin.com/in/codewithcharan/)")
if __name__ == "__main__":
main() | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/agentic_rag_embedding_gemma/agentic_rag_embeddinggemma.py | rag_tutorials/agentic_rag_embedding_gemma/agentic_rag_embeddinggemma.py | import streamlit as st
from agno.agent import Agent
from agno.knowledge.embedder.ollama import OllamaEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.ollama import Ollama
from agno.vectordb.lancedb import LanceDb, SearchType
# Page configuration
st.set_page_config(
page_title="Agentic RAG with Google's EmbeddingGemma",
page_icon="🔥",
layout="wide"
)
@st.cache_resource
def load_knowledge_base():
knowledge_base = Knowledge(
vector_db=LanceDb(
table_name="recipes",
uri="tmp/lancedb",
search_type=SearchType.vector,
embedder=OllamaEmbedder(id="embeddinggemma:latest", dimensions=768),
),
)
return knowledge_base
# Initialize URLs in session state
if 'urls' not in st.session_state:
st.session_state.urls = []
if 'urls_loaded' not in st.session_state:
st.session_state.urls_loaded = set()
kb = load_knowledge_base()
# Load initial URLs if any (only load once per URL)
for url in st.session_state.urls:
if url not in st.session_state.urls_loaded:
kb.add_content(url=url)
st.session_state.urls_loaded.add(url)
agent = Agent(
model=Ollama(id="llama3.2:latest"),
knowledge=kb,
instructions=[
"Search the knowledge base for relevant information and base your answers on it.",
"Be clear, and generate well-structured answers.",
"Use clear headings, bullet points, or numbered lists where appropriate.",
],
search_knowledge=True,
debug_mode=False,
markdown=True,
)
# Sidebar for adding knowledge sources
with st.sidebar:
col1, col2, col3 = st.columns(3)
with col1:
st.image("google.png")
with col2:
st.image("ollama.png")
with col3:
st.image("agno.png")
st.header("🌐 Add Knowledge Sources")
new_url = st.text_input(
"Add URL",
placeholder="https://example.com/sample.pdf",
help="Enter a PDF URL to add to the knowledge base",
)
if st.button("➕ Add URL", type="primary"):
if new_url:
if new_url not in st.session_state.urls:
st.session_state.urls.append(new_url)
with st.spinner("📥 Adding new URL..."):
kb.add_content(url=new_url)
st.session_state.urls_loaded.add(new_url)
st.success(f"✅ Added: {new_url}")
st.rerun()
else:
st.warning("This URL has already been added.")
else:
st.error("Please enter a URL")
# Display current URLs
if st.session_state.urls:
st.subheader("📚 Current Knowledge Sources")
for i, url in enumerate(st.session_state.urls, 1):
st.markdown(f"{i}. {url}")
# Main title and description
st.title("🔥 Agentic RAG with EmbeddingGemma (100% local)")
st.markdown(
"""
This app demonstrates an agentic RAG system using local models via [Ollama](https://ollama.com/):
- **EmbeddingGemma** for creating vector embeddings
- **LanceDB** as the local vector database
Add PDF URLs in the sidebar to start and ask questions about the content.
"""
)
query = st.text_input("Enter your question:")
# Simple answer generation
if st.button("🚀 Get Answer", type="primary"):
if not query:
st.error("Please enter a question")
else:
st.markdown("### 💡 Answer")
with st.spinner("🔍 Searching knowledge and generating answer..."):
try:
response = ""
resp_container = st.empty()
gen = agent.run(query, stream=True)
for resp_chunk in gen:
# Display response
if resp_chunk.content is not None:
response += resp_chunk.content
resp_container.markdown(response)
except Exception as e:
st.error(f"Error: {e}")
with st.expander("📖 How This Works"):
st.markdown(
"""
**This app uses the Agno framework to create an intelligent Q&A system:**
1. **Knowledge Loading**: PDF URLs are processed and stored in LanceDB vector database
2. **EmbeddingGemma as Embedder**: EmbeddingGemma generates local embeddings for semantic search
3. **Llama 3.2**: The Llama 3.2 model generates answers based on retrieved context
**Key Components:**
- `EmbeddingGemma` as the embedder
- `LanceDB` as the vector database
- `Knowledge`: Manages document loading from PDF URLs
- `OllamaEmbedder`: Uses EmbeddingGemma for embeddings
- `Agno Agent`: Orchestrates everything to answer questions
"""
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/rag_database_routing/rag_database_routing.py | rag_tutorials/rag_database_routing/rag_database_routing.py | import os
from typing import List, Dict, Any, Literal, Optional
from dataclasses import dataclass
import streamlit as st
from langchain_core.documents import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.vectorstores import Qdrant
from langchain_openai import OpenAIEmbeddings
from langchain_openai import ChatOpenAI
import tempfile
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from langchain.schema import HumanMessage
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain import hub
from langgraph.prebuilt import create_react_agent
from langchain_community.tools import DuckDuckGoSearchRun
from langchain_core.language_models import BaseLanguageModel
from langchain.prompts import ChatPromptTemplate
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
def init_session_state():
"""Initialize session state variables"""
if 'openai_api_key' not in st.session_state:
st.session_state.openai_api_key = ""
if 'qdrant_url' not in st.session_state:
st.session_state.qdrant_url = ""
if 'qdrant_api_key' not in st.session_state:
st.session_state.qdrant_api_key = ""
if 'embeddings' not in st.session_state:
st.session_state.embeddings = None
if 'llm' not in st.session_state:
st.session_state.llm = None
if 'databases' not in st.session_state:
st.session_state.databases = {}
init_session_state()
DatabaseType = Literal["products", "support", "finance"]
PERSIST_DIRECTORY = "db_storage"
@dataclass
class CollectionConfig:
name: str
description: str
collection_name: str # This will be used as Qdrant collection name
# Collection configurations
COLLECTIONS: Dict[DatabaseType, CollectionConfig] = {
"products": CollectionConfig(
name="Product Information",
description="Product details, specifications, and features",
collection_name="products_collection"
),
"support": CollectionConfig(
name="Customer Support & FAQ",
description="Customer support information, frequently asked questions, and guides",
collection_name="support_collection"
),
"finance": CollectionConfig(
name="Financial Information",
description="Financial data, revenue, costs, and liabilities",
collection_name="finance_collection"
)
}
def initialize_models():
"""Initialize OpenAI models and Qdrant client"""
if (st.session_state.openai_api_key and
st.session_state.qdrant_url and
st.session_state.qdrant_api_key):
os.environ["OPENAI_API_KEY"] = st.session_state.openai_api_key
st.session_state.embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
st.session_state.llm = ChatOpenAI(temperature=0)
try:
client = QdrantClient(
url=st.session_state.qdrant_url,
api_key=st.session_state.qdrant_api_key
)
# Test connection
client.get_collections()
vector_size = 1536
st.session_state.databases = {}
for db_type, config in COLLECTIONS.items():
try:
client.get_collection(config.collection_name)
except Exception:
# Create collection if it doesn't exist
client.create_collection(
collection_name=config.collection_name,
vectors_config=VectorParams(size=vector_size, distance=Distance.COSINE)
)
st.session_state.databases[db_type] = Qdrant(
client=client,
collection_name=config.collection_name,
embeddings=st.session_state.embeddings
)
return True
except Exception as e:
st.error(f"Failed to connect to Qdrant: {str(e)}")
return False
return False
def process_document(file) -> List[Document]:
"""Process uploaded PDF document"""
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
tmp_file.write(file.getvalue())
tmp_path = tmp_file.name
loader = PyPDFLoader(tmp_path)
documents = loader.load()
# Clean up temporary file
os.unlink(tmp_path)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
texts = text_splitter.split_documents(documents)
return texts
except Exception as e:
st.error(f"Error processing document: {e}")
return []
def create_routing_agent() -> Agent:
"""Creates a routing agent using agno framework"""
return Agent(
model=OpenAIChat(
id="gpt-4o",
api_key=st.session_state.openai_api_key
),
tools=[],
description="""You are a query routing expert. Your only job is to analyze questions and determine which database they should be routed to.
You must respond with exactly one of these three options: 'products', 'support', or 'finance'. The user's question is: {question}""",
instructions=[
"Follow these rules strictly:",
"1. For questions about products, features, specifications, or item details, or product manuals → return 'products'",
"2. For questions about help, guidance, troubleshooting, or customer service, FAQ, or guides → return 'support'",
"3. For questions about costs, revenue, pricing, or financial data, or financial reports and investments → return 'finance'",
"4. Return ONLY the database name, no other text or explanation",
"5. If you're not confident about the routing, return an empty response"
],
markdown=False,
show_tool_calls=False
)
def route_query(question: str) -> Optional[DatabaseType]:
"""Route query by searching all databases and comparing relevance scores.
Returns None if no suitable database is found."""
try:
best_score = -1
best_db_type = None
all_scores = {} # Store all scores for debugging
# Search each database and compare relevance scores
for db_type, db in st.session_state.databases.items():
results = db.similarity_search_with_score(
question,
k=3
)
if results:
avg_score = sum(score for _, score in results) / len(results)
all_scores[db_type] = avg_score
if avg_score > best_score:
best_score = avg_score
best_db_type = db_type
confidence_threshold = 0.5
if best_score >= confidence_threshold and best_db_type:
st.success(f"Using vector similarity routing: {best_db_type} (confidence: {best_score:.3f})")
return best_db_type
st.warning(f"Low confidence scores (below {confidence_threshold}), falling back to LLM routing")
# Fallback to LLM routing
routing_agent = create_routing_agent()
response = routing_agent.run(question)
db_type = (response.content
.strip()
.lower()
.translate(str.maketrans('', '', '`\'"')))
if db_type in COLLECTIONS:
st.success(f"Using LLM routing decision: {db_type}")
return db_type
st.warning("No suitable database found, will use web search fallback")
return None
except Exception as e:
st.error(f"Routing error: {str(e)}")
return None
def create_fallback_agent(chat_model: BaseLanguageModel):
"""Create a LangGraph agent for web research."""
def web_research(query: str) -> str:
"""Web search with result formatting."""
try:
search = DuckDuckGoSearchRun(num_results=5)
results = search.run(query)
return results
except Exception as e:
return f"Search failed: {str(e)}. Providing answer based on general knowledge."
tools = [web_research]
agent = create_react_agent(model=chat_model,
tools=tools,
debug=False)
return agent
def query_database(db: Qdrant, question: str) -> tuple[str, list]:
"""Query the database and return answer and relevant documents"""
try:
retriever = db.as_retriever(
search_type="similarity",
search_kwargs={"k": 4}
)
relevant_docs = retriever.get_relevant_documents(question)
if relevant_docs:
# Use simpler chain creation with hub prompt
retrieval_qa_prompt = ChatPromptTemplate.from_messages([
("system", """You are a helpful AI assistant that answers questions based on provided context.
Always be direct and concise in your responses.
If the context doesn't contain enough information to fully answer the question, acknowledge this limitation.
Base your answers strictly on the provided context and avoid making assumptions."""),
("human", "Here is the context:\n{context}"),
("human", "Question: {input}"),
("assistant", "I'll help answer your question based on the context provided."),
("human", "Please provide your answer:"),
])
combine_docs_chain = create_stuff_documents_chain(st.session_state.llm, retrieval_qa_prompt)
retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain)
response = retrieval_chain.invoke({"input": question})
return response['answer'], relevant_docs
raise ValueError("No relevant documents found in database")
except Exception as e:
st.error(f"Error: {str(e)}")
return "I encountered an error. Please try rephrasing your question.", []
def _handle_web_fallback(question: str) -> tuple[str, list]:
st.info("No relevant documents found. Searching web...")
fallback_agent = create_fallback_agent(st.session_state.llm)
with st.spinner('Researching...'):
agent_input = {
"messages": [
HumanMessage(content=f"Research and provide a detailed answer for: '{question}'")
],
"is_last_step": False
}
try:
response = fallback_agent.invoke(agent_input, config={"recursion_limit": 100})
if isinstance(response, dict) and "messages" in response:
answer = response["messages"][-1].content
return f"Web Search Result:\n{answer}", []
except Exception:
# Fallback to general LLM response
fallback_response = st.session_state.llm.invoke(question).content
return f"Web search unavailable. General response: {fallback_response}", []
def main():
"""Main application function."""
st.set_page_config(page_title="RAG Agent with Database Routing", page_icon="📚")
st.title("📠 RAG Agent with Database Routing")
# Sidebar for API keys and configuration
with st.sidebar:
st.header("Configuration")
# OpenAI API Key
api_key = st.text_input(
"Enter OpenAI API Key:",
type="password",
value=st.session_state.openai_api_key,
key="api_key_input"
)
# Qdrant Configuration
qdrant_url = st.text_input(
"Enter Qdrant URL:",
value=st.session_state.qdrant_url,
help="Example: https://your-cluster.qdrant.tech"
)
qdrant_api_key = st.text_input(
"Enter Qdrant API Key:",
type="password",
value=st.session_state.qdrant_api_key
)
# Update session state
if api_key:
st.session_state.openai_api_key = api_key
if qdrant_url:
st.session_state.qdrant_url = qdrant_url
if qdrant_api_key:
st.session_state.qdrant_api_key = qdrant_api_key
# Initialize models if all credentials are provided
if (st.session_state.openai_api_key and
st.session_state.qdrant_url and
st.session_state.qdrant_api_key):
if initialize_models():
st.success("Connected to OpenAI and Qdrant successfully!")
else:
st.error("Failed to initialize. Please check your credentials.")
else:
st.warning("Please enter all required credentials to continue")
st.stop()
st.markdown("---")
st.header("Document Upload")
st.info("Upload documents to populate the databases. Each tab corresponds to a different database.")
tabs = st.tabs([collection_config.name for collection_config in COLLECTIONS.values()])
for (collection_type, collection_config), tab in zip(COLLECTIONS.items(), tabs):
with tab:
st.write(collection_config.description)
uploaded_files = st.file_uploader(
f"Upload PDF documents to {collection_config.name}",
type="pdf",
key=f"upload_{collection_type}",
accept_multiple_files=True
)
if uploaded_files:
with st.spinner('Processing documents...'):
all_texts = []
for uploaded_file in uploaded_files:
texts = process_document(uploaded_file)
all_texts.extend(texts)
if all_texts:
db = st.session_state.databases[collection_type]
db.add_documents(all_texts)
st.success("Documents processed and added to the database!")
# Query section
st.header("Ask Questions")
st.info("Enter your question below to find answers from the relevant database.")
question = st.text_input("Enter your question:")
if question:
with st.spinner('Finding answer...'):
# Route the question
collection_type = route_query(question)
if collection_type is None:
# Use web search fallback directly
answer, relevant_docs = _handle_web_fallback(question)
st.write("### Answer (from web search)")
st.write(answer)
else:
# Display routing information and query the database
st.info(f"Routing question to: {COLLECTIONS[collection_type].name}")
db = st.session_state.databases[collection_type]
answer, relevant_docs = query_database(db, question)
st.write("### Answer")
st.write(answer)
if __name__ == "__main__":
main()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/rag-as-a-service/rag_app.py | rag_tutorials/rag-as-a-service/rag_app.py | import streamlit as st
import requests
from anthropic import Anthropic
import time
from typing import List, Dict, Optional
from urllib.parse import urlparse
class RAGPipeline:
def __init__(self, ragie_api_key: str, anthropic_api_key: str):
"""
Initialize the RAG pipeline with API keys.
"""
self.ragie_api_key = ragie_api_key
self.anthropic_api_key = anthropic_api_key
self.anthropic_client = Anthropic(api_key=anthropic_api_key)
# API endpoints
self.RAGIE_UPLOAD_URL = "https://api.ragie.ai/documents/url"
self.RAGIE_RETRIEVAL_URL = "https://api.ragie.ai/retrievals"
def upload_document(self, url: str, name: Optional[str] = None, mode: str = "fast") -> Dict:
"""
Upload a document to Ragie from a URL.
"""
if not name:
name = urlparse(url).path.split('/')[-1] or "document"
payload = {
"mode": mode,
"name": name,
"url": url
}
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": f"Bearer {self.ragie_api_key}"
}
response = requests.post(self.RAGIE_UPLOAD_URL, json=payload, headers=headers)
if not response.ok:
raise Exception(f"Document upload failed: {response.status_code} {response.reason}")
return response.json()
def retrieve_chunks(self, query: str, scope: str = "tutorial") -> List[str]:
"""
Retrieve relevant chunks from Ragie for a given query.
"""
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.ragie_api_key}"
}
payload = {
"query": query,
"filters": {
"scope": scope
}
}
response = requests.post(
self.RAGIE_RETRIEVAL_URL,
headers=headers,
json=payload
)
if not response.ok:
raise Exception(f"Retrieval failed: {response.status_code} {response.reason}")
data = response.json()
return [chunk["text"] for chunk in data["scored_chunks"]]
def create_system_prompt(self, chunk_texts: List[str]) -> str:
"""
Create the system prompt with the retrieved chunks.
"""
return f"""These are very important to follow: You are "Ragie AI", a professional but friendly AI chatbot working as an assistant to the user. Your current task is to help the user based on all of the information available to you shown below. Answer informally, directly, and concisely without a heading or greeting but include everything relevant. Use richtext Markdown when appropriate including bold, italic, paragraphs, and lists when helpful. If using LaTeX, use double $$ as delimiter instead of single $. Use $$...$$ instead of parentheses. Organize information into multiple sections or points when appropriate. Don't include raw item IDs or other raw fields from the source. Don't use XML or other markup unless requested by the user. Here is all of the information available to answer the user: === {chunk_texts} === If the user asked for a search and there are no results, make sure to let the user know that you couldn't find anything, and what they might be able to do to find the information they need. END SYSTEM INSTRUCTIONS"""
def generate_response(self, system_prompt: str, query: str) -> str:
"""
Generate response using Claude 4.5 Sonnet.
"""
message = self.anthropic_client.messages.create(
model="claude-sonnet-4-5",
max_tokens=1024,
system=system_prompt,
messages=[
{
"role": "user",
"content": query
}
]
)
return message.content[0].text
def process_query(self, query: str, scope: str = "tutorial") -> str:
"""
Process a query through the complete RAG pipeline.
"""
chunks = self.retrieve_chunks(query, scope)
if not chunks:
return "No relevant information found for your query."
system_prompt = self.create_system_prompt(chunks)
return self.generate_response(system_prompt, query)
def initialize_session_state():
"""Initialize session state variables."""
if 'pipeline' not in st.session_state:
st.session_state.pipeline = None
if 'document_uploaded' not in st.session_state:
st.session_state.document_uploaded = False
if 'api_keys_submitted' not in st.session_state:
st.session_state.api_keys_submitted = False
def main():
st.set_page_config(page_title="RAG-as-a-Service", layout="wide")
initialize_session_state()
st.title(":linked_paperclips: RAG-as-a-Service")
# API Keys Section
with st.expander("🔑 API Keys Configuration", expanded=not st.session_state.api_keys_submitted):
col1, col2 = st.columns(2)
with col1:
ragie_key = st.text_input("Ragie API Key", type="password", key="ragie_key")
with col2:
anthropic_key = st.text_input("Anthropic API Key", type="password", key="anthropic_key")
if st.button("Submit API Keys"):
if ragie_key and anthropic_key:
try:
st.session_state.pipeline = RAGPipeline(ragie_key, anthropic_key)
st.session_state.api_keys_submitted = True
st.success("API keys configured successfully!")
except Exception as e:
st.error(f"Error configuring API keys: {str(e)}")
else:
st.error("Please provide both API keys.")
# Document Upload Section
if st.session_state.api_keys_submitted:
st.markdown("### 📄 Document Upload")
doc_url = st.text_input("Enter document URL")
doc_name = st.text_input("Document name (optional)")
col1, col2 = st.columns([1, 3])
with col1:
upload_mode = st.selectbox("Upload mode", ["fast", "accurate"])
if st.button("Upload Document"):
if doc_url:
try:
with st.spinner("Uploading document..."):
st.session_state.pipeline.upload_document(
url=doc_url,
name=doc_name if doc_name else None,
mode=upload_mode
)
time.sleep(5) # Wait for indexing
st.session_state.document_uploaded = True
st.success("Document uploaded and indexed successfully!")
except Exception as e:
st.error(f"Error uploading document: {str(e)}")
else:
st.error("Please provide a document URL.")
# Query Section
if st.session_state.document_uploaded:
st.markdown("### 🔍 Query Document")
query = st.text_input("Enter your query")
if st.button("Generate Response"):
if query:
try:
with st.spinner("Generating response..."):
response = st.session_state.pipeline.process_query(query)
st.markdown("### Response:")
st.markdown(response)
except Exception as e:
st.error(f"Error generating response: {str(e)}")
else:
st.error("Please enter a query.")
if __name__ == "__main__":
main() | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/contextualai_rag_agent/contextualai_rag_agent.py | rag_tutorials/contextualai_rag_agent/contextualai_rag_agent.py | import os
import tempfile
import time
from typing import List, Optional, Tuple, Any
import streamlit as st
import requests
import json
import re
from contextual import ContextualAI
def init_session_state() -> None:
if "api_key_submitted" not in st.session_state:
st.session_state.api_key_submitted = False
if "contextual_api_key" not in st.session_state:
st.session_state.contextual_api_key = ""
if "base_url" not in st.session_state:
st.session_state.base_url = "https://api.contextual.ai/v1"
if "agent_id" not in st.session_state:
st.session_state.agent_id = ""
if "datastore_id" not in st.session_state:
st.session_state.datastore_id = ""
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "processed_file" not in st.session_state:
st.session_state.processed_file = False
if "last_raw_response" not in st.session_state:
st.session_state.last_raw_response = None
if "last_user_query" not in st.session_state:
st.session_state.last_user_query = ""
def sidebar_api_form() -> bool:
with st.sidebar:
st.header("API & Resource Setup")
if st.session_state.api_key_submitted:
st.success("API verified")
if st.button("Reset Setup"):
st.session_state.clear()
st.rerun()
return True
with st.form("contextual_api_form"):
api_key = st.text_input("Contextual AI API Key", type="password")
base_url = st.text_input(
"Base URL",
value=st.session_state.base_url,
help="Include /v1 (e.g., https://api.contextual.ai/v1)",
)
existing_agent_id = st.text_input("Existing Agent ID (optional)")
existing_datastore_id = st.text_input("Existing Datastore ID (optional)")
if st.form_submit_button("Save & Verify"):
try:
client = ContextualAI(api_key=api_key, base_url=base_url)
_ = client.agents.list()
st.session_state.contextual_api_key = api_key
st.session_state.base_url = base_url
st.session_state.agent_id = existing_agent_id
st.session_state.datastore_id = existing_datastore_id
st.session_state.api_key_submitted = True
st.success("Credentials verified!")
st.rerun()
except Exception as e:
st.error(f"Credential verification failed: {str(e)}")
return False
def ensure_client():
if not st.session_state.get("contextual_api_key"):
raise ValueError("Contextual AI API key not provided")
return ContextualAI(api_key=st.session_state.contextual_api_key, base_url=st.session_state.base_url)
def create_datastore(client, name: str) -> Optional[str]:
try:
ds = client.datastores.create(name=name)
return getattr(ds, "id", None)
except Exception as e:
st.error(f"Failed to create datastore: {e}")
return None
ALLOWED_EXTS = {".pdf", ".html", ".htm", ".mhtml", ".doc", ".docx", ".ppt", ".pptx"}
def upload_documents(client, datastore_id: str, files: List[bytes], filenames: List[str], metadata: Optional[dict]) -> List[str]:
doc_ids: List[str] = []
for content, fname in zip(files, filenames):
try:
ext = os.path.splitext(fname)[1].lower()
if ext not in ALLOWED_EXTS:
st.error(f"Unsupported file extension for {fname}. Allowed: {sorted(ALLOWED_EXTS)}")
continue
with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as tmp:
tmp.write(content)
tmp_path = tmp.name
with open(tmp_path, "rb") as f:
if metadata:
result = client.datastores.documents.ingest(datastore_id, file=f, metadata=metadata)
else:
result = client.datastores.documents.ingest(datastore_id, file=f)
doc_ids.append(getattr(result, "id", ""))
except Exception as e:
st.error(f"Failed to upload {fname}: {e}")
finally:
try:
os.unlink(tmp_path)
except Exception:
pass
return doc_ids
def wait_until_documents_ready(api_key: str, datastore_id: str, base_url: str, max_checks: int = 30, interval_sec: float = 5.0) -> None:
url = f"{base_url.rstrip('/')}/datastores/{datastore_id}/documents"
headers = {"Authorization": f"Bearer {api_key}"}
for _ in range(max_checks):
try:
resp = requests.get(url, headers=headers, timeout=30)
if resp.status_code == 200:
docs = resp.json().get("documents", [])
if not any(d.get("status") in ("processing", "pending") for d in docs):
return
time.sleep(interval_sec)
except Exception:
time.sleep(interval_sec)
def create_agent(client, name: str, description: str, datastore_id: str) -> Optional[str]:
try:
agent = client.agents.create(name=name, description=description, datastore_ids=[datastore_id])
return getattr(agent, "id", None)
except Exception as e:
st.error(f"Failed to create agent: {e}")
return None
def query_agent(client, agent_id: str, query: str) -> Tuple[str, Any]:
try:
resp = client.agents.query.create(agent_id=agent_id, messages=[{"role": "user", "content": query}])
if hasattr(resp, "content"):
return resp.content, resp
if hasattr(resp, "message") and hasattr(resp.message, "content"):
return resp.message.content, resp
if hasattr(resp, "messages") and resp.messages:
last_msg = resp.messages[-1]
return getattr(last_msg, "content", str(last_msg)), resp
return str(resp), resp
except Exception as e:
return f"Error querying agent: {e}", None
def show_retrieval_info(client, raw_response, agent_id: str) -> None:
try:
if not raw_response:
st.info("No retrieval info available.")
return
message_id = getattr(raw_response, "message_id", None)
retrieval_contents = getattr(raw_response, "retrieval_contents", [])
if not message_id or not retrieval_contents:
st.info("No retrieval metadata returned.")
return
first_content_id = getattr(retrieval_contents[0], "content_id", None)
if not first_content_id:
st.info("Missing content_id in retrieval metadata.")
return
ret_result = client.agents.query.retrieval_info(message_id=message_id, agent_id=agent_id, content_ids=[first_content_id])
metadatas = getattr(ret_result, "content_metadatas", [])
if not metadatas:
st.info("No content metadatas found.")
return
page_img_b64 = getattr(metadatas[0], "page_img", None)
if not page_img_b64:
st.info("No page image provided in metadata.")
return
import base64
img_bytes = base64.b64decode(page_img_b64)
st.image(img_bytes, caption="Top Attribution Page", use_container_width=True)
# Removed raw object rendering to keep UI clean
except Exception as e:
st.error(f"Failed to load retrieval info: {e}")
def update_agent_prompt(client, agent_id: str, system_prompt: str) -> bool:
try:
client.agents.update(agent_id=agent_id, system_prompt=system_prompt)
return True
except Exception as e:
st.error(f"Failed to update system prompt: {e}")
return False
def evaluate_with_lmunit(client, query: str, response_text: str, unit_test: str):
try:
result = client.lmunit.create(query=query, response=response_text, unit_test=unit_test)
st.subheader("Evaluation Result")
st.code(str(result), language="json")
except Exception as e:
st.error(f"LMUnit evaluation failed: {e}")
def post_process_answer(text: str) -> str:
text = re.sub(r"\(\s*\)", "", text)
text = text.replace("• ", "\n- ")
return text
init_session_state()
st.title("Contextual AI RAG Agent")
if not sidebar_api_form():
st.info("Please enter your Contextual AI API key in the sidebar to continue.")
st.stop()
client = ensure_client()
with st.expander("1) Create or Select Datastore", expanded=True):
if not st.session_state.datastore_id:
default_name = "contextualai_rag_datastore"
ds_name = st.text_input("Datastore Name", value=default_name)
if st.button("Create Datastore"):
ds_id = create_datastore(client, ds_name)
if ds_id:
st.session_state.datastore_id = ds_id
st.success(f"Created datastore: {ds_id}")
else:
st.success(f"Using Datastore: {st.session_state.datastore_id}")
with st.expander("2) Upload Documents", expanded=True):
uploaded_files = st.file_uploader("Upload PDFs or text files", type=["pdf", "txt", "md"], accept_multiple_files=True)
metadata_json = st.text_area("Custom Metadata (JSON)", value="", placeholder='{"custom_metadata": {"field1": "value1"}}')
if uploaded_files and st.session_state.datastore_id:
contents = [f.getvalue() for f in uploaded_files]
names = [f.name for f in uploaded_files]
if st.button("Ingest Documents"):
parsed_metadata = None
if metadata_json.strip():
try:
parsed_metadata = json.loads(metadata_json)
except Exception as e:
st.error(f"Invalid metadata JSON: {e}")
parsed_metadata = None
ids = upload_documents(client, st.session_state.datastore_id, contents, names, parsed_metadata)
if ids:
st.success(f"Uploaded {len(ids)} document(s)")
wait_until_documents_ready(st.session_state.contextual_api_key, st.session_state.datastore_id, st.session_state.base_url)
st.info("Documents are ready.")
with st.expander("3) Create or Select Agent", expanded=True):
if not st.session_state.agent_id and st.session_state.datastore_id:
agent_name = st.text_input("Agent Name", value="ContextualAI RAG Agent")
agent_desc = st.text_area("Agent Description", value="RAG agent over uploaded documents")
if st.button("Create Agent"):
a_id = create_agent(client, agent_name, agent_desc, st.session_state.datastore_id)
if a_id:
st.session_state.agent_id = a_id
st.success(f"Created agent: {a_id}")
elif st.session_state.agent_id:
st.success(f"Using Agent: {st.session_state.agent_id}")
with st.expander("4) Agent Settings (Optional)"):
if st.session_state.agent_id:
system_prompt_val = st.text_area("System Prompt", value="", placeholder="Paste a new system prompt to update your agent")
if st.button("Update System Prompt") and system_prompt_val.strip():
ok = update_agent_prompt(client, st.session_state.agent_id, system_prompt_val.strip())
if ok:
st.success("System prompt updated.")
st.divider()
for message in st.session_state.chat_history:
with st.chat_message(message["role"]):
st.markdown(message["content"])
query = st.chat_input("Ask a question about your documents")
if query:
st.session_state.last_user_query = query
st.session_state.chat_history.append({"role": "user", "content": query})
with st.chat_message("user"):
st.markdown(query)
if st.session_state.agent_id:
with st.chat_message("assistant"):
answer, raw = query_agent(client, st.session_state.agent_id, query)
st.session_state.last_raw_response = raw
processed = post_process_answer(answer)
st.markdown(processed)
st.session_state.chat_history.append({"role": "assistant", "content": processed})
else:
st.error("Please create or select an agent first.")
with st.expander("Debug & Evaluation", expanded=False):
st.caption("Tools to inspect retrievals and evaluate answers")
if st.session_state.agent_id:
if st.checkbox("Show Retrieval Info", value=False):
show_retrieval_info(client, st.session_state.last_raw_response, st.session_state.agent_id)
st.markdown("")
unit_test = st.text_area("LMUnit rubric / unit test", value="Does the response avoid unnecessary information?", height=80)
if st.button("Evaluate Last Answer with LMUnit"):
if st.session_state.last_user_query and st.session_state.chat_history:
last_assistant_msgs = [m for m in st.session_state.chat_history if m["role"] == "assistant"]
if last_assistant_msgs:
evaluate_with_lmunit(client, st.session_state.last_user_query, last_assistant_msgs[-1]["content"], unit_test)
else:
st.info("No assistant response to evaluate yet.")
else:
st.info("Ask a question first to run an evaluation.")
with st.sidebar:
st.divider()
col1, col2 = st.columns(2)
with col1:
if st.button("Clear Chat"):
st.session_state.chat_history = []
st.session_state.last_raw_response = None
st.session_state.last_user_query = ""
st.rerun()
with col2:
if st.button("Reset App"):
st.session_state.clear()
st.rerun()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/vision_rag/vision_rag.py | rag_tutorials/vision_rag/vision_rag.py | import requests
import os
import io
import base64
import PIL
from PIL import Image
import tqdm
import numpy as np
import streamlit as st
import cohere
from google import genai
import fitz # PyMuPDF
# --- Streamlit App Configuration ---
st.set_page_config(layout="wide", page_title="Vision RAG with Cohere Embed-4")
st.title("Vision RAG with Cohere Embed-4 🖼️")
# --- API Key Input ---
with st.sidebar:
st.header("🔑 API Keys")
cohere_api_key = st.text_input("Cohere API Key", type="password", key="cohere_key")
google_api_key = st.text_input("Google API Key (Gemini)", type="password", key="google_key")
"[Get a Cohere API key](https://dashboard.cohere.com/api-keys)"
"[Get a Google API key](https://aistudio.google.com/app/apikey)"
st.markdown("---")
if not cohere_api_key:
st.warning("Please enter your Cohere API key to proceed.")
if not google_api_key:
st.warning("Please enter your Google API key to proceed.")
st.markdown("---")
# --- Initialize API Clients ---
co = None
genai_client = None
# Initialize Session State for embeddings and paths
if 'image_paths' not in st.session_state:
st.session_state.image_paths = []
if 'doc_embeddings' not in st.session_state:
st.session_state.doc_embeddings = None
if cohere_api_key and google_api_key:
try:
co = cohere.ClientV2(api_key=cohere_api_key)
st.sidebar.success("Cohere Client Initialized!")
except Exception as e:
st.sidebar.error(f"Cohere Initialization Failed: {e}")
try:
genai_client = genai.Client(api_key=google_api_key)
st.sidebar.success("Gemini Client Initialized!")
except Exception as e:
st.sidebar.error(f"Gemini Initialization Failed: {e}")
else:
st.info("Enter your API keys in the sidebar to start.")
# Information about the models
with st.expander("ℹ️ About the models used"):
st.markdown("""
### Cohere Embed-4
Cohere's Embed-4 is a state-of-the-art multimodal embedding model designed for enterprise search and retrieval.
It enables:
- **Multimodal search**: Search text and images together seamlessly
- **High accuracy**: State-of-the-art performance for retrieval tasks
- **Efficient embedding**: Process complex images like charts, graphs, and infographics
The model processes images without requiring complex OCR pre-processing and maintains the connection between visual elements and text.
### Google Gemini 2.5 Flash
Gemini 2.5 Flash is Google's efficient multimodal model that can process text and image inputs to generate high-quality responses.
It's designed for fast inference while maintaining high accuracy, making it ideal for real-time applications like this RAG system.
""")
# --- Helper functions ---
# Some helper functions to resize images and to convert them to base64 format
max_pixels = 1568*1568 #Max resolution for images
# Resize too large images
def resize_image(pil_image: PIL.Image.Image) -> None:
"""Resizes the image in-place if it exceeds max_pixels."""
org_width, org_height = pil_image.size
# Resize image if too large
if org_width * org_height > max_pixels:
scale_factor = (max_pixels / (org_width * org_height)) ** 0.5
new_width = int(org_width * scale_factor)
new_height = int(org_height * scale_factor)
pil_image.thumbnail((new_width, new_height))
# Convert images to a base64 string before sending it to the API
def base64_from_image(img_path: str) -> str:
"""Converts an image file to a base64 encoded string."""
pil_image = PIL.Image.open(img_path)
img_format = pil_image.format if pil_image.format else "PNG"
resize_image(pil_image)
with io.BytesIO() as img_buffer:
pil_image.save(img_buffer, format=img_format)
img_buffer.seek(0)
img_data = f"data:image/{img_format.lower()};base64,"+base64.b64encode(img_buffer.read()).decode("utf-8")
return img_data
# Convert PIL image to base64 string
def pil_to_base64(pil_image: PIL.Image.Image) -> str:
"""Converts a PIL image to a base64 encoded string."""
if pil_image.format is None:
img_format = "PNG"
else:
img_format = pil_image.format
resize_image(pil_image)
with io.BytesIO() as img_buffer:
pil_image.save(img_buffer, format=img_format)
img_buffer.seek(0)
img_data = f"data:image/{img_format.lower()};base64,"+base64.b64encode(img_buffer.read()).decode("utf-8")
return img_data
# Compute embedding for an image
@st.cache_data(ttl=3600, show_spinner=False)
def compute_image_embedding(base64_img: str, _cohere_client) -> np.ndarray | None:
"""Computes an embedding for an image using Cohere's Embed-4 model."""
try:
api_response = _cohere_client.embed(
model="embed-v4.0",
input_type="search_document",
embedding_types=["float"],
images=[base64_img],
)
if api_response.embeddings and api_response.embeddings.float:
return np.asarray(api_response.embeddings.float[0])
else:
st.warning("Could not get embedding. API response might be empty.")
return None
except Exception as e:
st.error(f"Error computing embedding: {e}")
return None
# Process a PDF file: extract pages as images and embed them
# Note: Caching PDF processing might be complex due to potential large file sizes and streams
# We will process it directly for now, but show progress.
def process_pdf_file(pdf_file, cohere_client, base_output_folder="pdf_pages") -> tuple[list[str], list[np.ndarray] | None]:
"""Extracts pages from a PDF as images, embeds them, and saves them.
Args:
pdf_file: UploadedFile object from Streamlit.
cohere_client: Initialized Cohere client.
base_output_folder: Directory to save page images.
Returns:
A tuple containing:
- list of paths to the saved page images.
- list of numpy array embeddings for each page, or None if embedding fails.
"""
page_image_paths = []
page_embeddings = []
pdf_filename = pdf_file.name
output_folder = os.path.join(base_output_folder, os.path.splitext(pdf_filename)[0])
os.makedirs(output_folder, exist_ok=True)
try:
# Open PDF from stream
doc = fitz.open(stream=pdf_file.read(), filetype="pdf")
st.write(f"Processing PDF: {pdf_filename} ({len(doc)} pages)")
pdf_progress = st.progress(0.0)
for i, page in enumerate(doc.pages()):
page_num = i + 1
page_img_path = os.path.join(output_folder, f"page_{page_num}.png")
page_image_paths.append(page_img_path)
# Render page to pixmap (image)
pix = page.get_pixmap(dpi=150) # Adjust DPI as needed for quality/performance
pil_image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
# Save the page image temporarily
pil_image.save(page_img_path, "PNG")
# Convert PIL image to base64
base64_img = pil_to_base64(pil_image)
# Compute embedding for the page image
emb = compute_image_embedding(base64_img, _cohere_client=cohere_client)
if emb is not None:
page_embeddings.append(emb)
else:
st.warning(f"Could not embed page {page_num} from {pdf_filename}. Skipping.")
# Add a placeholder to keep lists aligned, will be filtered later
page_embeddings.append(None)
# Update progress
pdf_progress.progress((i + 1) / len(doc))
doc.close()
pdf_progress.empty() # Remove progress bar after completion
# Filter out pages where embedding failed
valid_paths = [path for i, path in enumerate(page_image_paths) if page_embeddings[i] is not None]
valid_embeddings = [emb for emb in page_embeddings if emb is not None]
if not valid_embeddings:
st.error(f"Failed to generate any embeddings for {pdf_filename}.")
return [], None
return valid_paths, valid_embeddings
except Exception as e:
st.error(f"Error processing PDF {pdf_filename}: {e}")
return [], None
# Download and embed sample images
@st.cache_data(ttl=3600, show_spinner=False)
def download_and_embed_sample_images(_cohere_client) -> tuple[list[str], np.ndarray | None]:
"""Downloads sample images and computes their embeddings using Cohere's Embed-4 model."""
# Several images from https://www.appeconomyinsights.com/
images = {
"tesla.png": "https://substackcdn.com/image/fetch/w_1456,c_limit,f_webp,q_auto:good,fl_progressive:steep/https%3A%2F%2Fsubstack-post-media.s3.amazonaws.com%2Fpublic%2Fimages%2Fbef936e6-3efa-43b3-88d7-7ec620cdb33b_2744x1539.png",
"netflix.png": "https://substackcdn.com/image/fetch/w_1456,c_limit,f_webp,q_auto:good,fl_progressive:steep/https%3A%2F%2Fsubstack-post-media.s3.amazonaws.com%2Fpublic%2Fimages%2F23bd84c9-5b62-4526-b467-3088e27e4193_2744x1539.png",
"nike.png": "https://substackcdn.com/image/fetch/w_1456,c_limit,f_webp,q_auto:good,fl_progressive:steep/https%3A%2F%2Fsubstack-post-media.s3.amazonaws.com%2Fpublic%2Fimages%2Fa5cd33ba-ae1a-42a8-a254-d85e690d9870_2741x1541.png",
"google.png": "https://substackcdn.com/image/fetch/f_auto,q_auto:good,fl_progressive:steep/https%3A%2F%2Fsubstack-post-media.s3.amazonaws.com%2Fpublic%2Fimages%2F395dd3b9-b38e-4d1f-91bc-d37b642ee920_2741x1541.png",
"accenture.png": "https://substackcdn.com/image/fetch/w_1456,c_limit,f_webp,q_auto:good,fl_progressive:steep/https%3A%2F%2Fsubstack-post-media.s3.amazonaws.com%2Fpublic%2Fimages%2F08b2227c-7dc8-49f7-b3c5-13cab5443ba6_2741x1541.png",
"tecent.png": "https://substackcdn.com/image/fetch/w_1456,c_limit,f_webp,q_auto:good,fl_progressive:steep/https%3A%2F%2Fsubstack-post-media.s3.amazonaws.com%2Fpublic%2Fimages%2F0ec8448c-c4d1-4aab-a8e9-2ddebe0c95fd_2741x1541.png"
}
# Prepare folders
img_folder = "img"
os.makedirs(img_folder, exist_ok=True)
img_paths = []
doc_embeddings = []
# Wrap TQDM with st.spinner for better UI integration
with st.spinner("Downloading and embedding sample images..."):
pbar = tqdm.tqdm(images.items(), desc="Processing sample images")
for name, url in pbar:
img_path = os.path.join(img_folder, name)
# Don't re-append if already processed (useful if function called multiple times)
if img_path not in img_paths:
img_paths.append(img_path)
# Download the image
if not os.path.exists(img_path):
try:
response = requests.get(url)
response.raise_for_status()
with open(img_path, "wb") as fOut:
fOut.write(response.content)
except requests.exceptions.RequestException as e:
st.error(f"Failed to download {name}: {e}")
# Optionally remove the path if download failed
img_paths.pop()
continue # Skip if download fails
# Get embedding for the image if it exists and we haven't computed one yet
# Find index corresponding to this path
current_index = -1
try:
current_index = img_paths.index(img_path)
except ValueError:
continue # Should not happen if append logic is correct
# Check if embedding already exists for this index
if current_index >= len(doc_embeddings):
try:
# Ensure file exists before trying to embed
if os.path.exists(img_path):
base64_img = base64_from_image(img_path)
emb = compute_image_embedding(base64_img, _cohere_client=_cohere_client)
if emb is not None:
# Placeholder to ensure list length matches paths before vstack
while len(doc_embeddings) < current_index:
doc_embeddings.append(None) # Append placeholder if needed
doc_embeddings.append(emb)
else:
# If file doesn't exist (maybe failed download), add placeholder
while len(doc_embeddings) < current_index:
doc_embeddings.append(None)
doc_embeddings.append(None)
except Exception as e:
st.error(f"Failed to embed {name}: {e}")
# Add placeholder on error
while len(doc_embeddings) < current_index:
doc_embeddings.append(None)
doc_embeddings.append(None)
# Filter out None embeddings and corresponding paths before stacking
filtered_paths = [path for i, path in enumerate(img_paths) if i < len(doc_embeddings) and doc_embeddings[i] is not None]
filtered_embeddings = [emb for emb in doc_embeddings if emb is not None]
if filtered_embeddings:
doc_embeddings_array = np.vstack(filtered_embeddings)
return filtered_paths, doc_embeddings_array
return [], None
# Search function
def search(question: str, co_client: cohere.Client, embeddings: np.ndarray, image_paths: list[str], max_img_size: int = 800) -> str | None:
"""Finds the most relevant image path for a given question."""
if not co_client or embeddings is None or embeddings.size == 0 or not image_paths:
st.warning("Search prerequisites not met (client, embeddings, or paths missing/empty).")
return None
if embeddings.shape[0] != len(image_paths):
st.error(f"Mismatch between embeddings count ({embeddings.shape[0]}) and image paths count ({len(image_paths)}). Cannot perform search.")
return None
try:
# Compute the embedding for the query
api_response = co_client.embed(
model="embed-v4.0",
input_type="search_query",
embedding_types=["float"],
texts=[question],
)
if not api_response.embeddings or not api_response.embeddings.float:
st.error("Failed to get query embedding.")
return None
query_emb = np.asarray(api_response.embeddings.float[0])
# Ensure query embedding has the correct shape for dot product
if query_emb.shape[0] != embeddings.shape[1]:
st.error(f"Query embedding dimension ({query_emb.shape[0]}) does not match document embedding dimension ({embeddings.shape[1]}).")
return None
# Compute cosine similarities
cos_sim_scores = np.dot(query_emb, embeddings.T)
# Get the most relevant image
top_idx = np.argmax(cos_sim_scores)
hit_img_path = image_paths[top_idx]
print(f"Question: {question}") # Keep for debugging
print(f"Most relevant image: {hit_img_path}") # Keep for debugging
return hit_img_path
except Exception as e:
st.error(f"Error during search: {e}")
return None
# Answer function
def answer(question: str, img_path: str, gemini_client) -> str:
"""Answers the question based on the provided image using Gemini."""
if not gemini_client or not img_path or not os.path.exists(img_path):
missing = []
if not gemini_client: missing.append("Gemini client")
if not img_path: missing.append("Image path")
elif not os.path.exists(img_path): missing.append(f"Image file at {img_path}")
return f"Answering prerequisites not met ({', '.join(missing)} missing or invalid)."
try:
img = PIL.Image.open(img_path)
prompt = [f"""Answer the question based on the following image. Be as elaborate as possible giving extra relevant information.
Don't use markdown formatting in the response.
Please provide enough context for your answer.
Question: {question}""", img]
response = gemini_client.models.generate_content(
model="gemini-2.5-flash",
contents=prompt
)
llm_answer = response.text
print("LLM Answer:", llm_answer) # Keep for debugging
return llm_answer
except Exception as e:
st.error(f"Error during answer generation: {e}")
return f"Failed to generate answer: {e}"
# --- Main UI Setup ---
st.subheader("📊 Load Sample Images")
if cohere_api_key and co:
# If button clicked, load sample images into session state
if st.button("Load Sample Images", key="load_sample_button"):
sample_img_paths, sample_doc_embeddings = download_and_embed_sample_images(_cohere_client=co)
if sample_img_paths and sample_doc_embeddings is not None:
# Append sample images to session state (avoid duplicates if clicked again)
current_paths = set(st.session_state.image_paths)
new_paths = [p for p in sample_img_paths if p not in current_paths]
if new_paths:
new_indices = [i for i, p in enumerate(sample_img_paths) if p in new_paths]
st.session_state.image_paths.extend(new_paths)
new_embeddings_to_add = sample_doc_embeddings[[idx for idx, p in enumerate(sample_img_paths) if p in new_paths]]
if st.session_state.doc_embeddings is None or st.session_state.doc_embeddings.size == 0:
st.session_state.doc_embeddings = new_embeddings_to_add
else:
st.session_state.doc_embeddings = np.vstack((st.session_state.doc_embeddings, new_embeddings_to_add))
st.success(f"Loaded {len(new_paths)} sample images.")
else:
st.info("Sample images already loaded.")
else:
st.error("Failed to load sample images. Check console for errors.")
else:
st.warning("Enter API keys to enable loading sample images.")
st.markdown("--- ")
# --- File Uploader (Main UI) ---
st.subheader("📤 Upload Your Images")
st.info("Or, upload your own images or PDFs. The RAG process will search across all loaded content.")
# File uploader
uploaded_files = st.file_uploader("Upload images (PNG, JPG, JPEG) or PDFs",
type=["png", "jpg", "jpeg", "pdf"],
accept_multiple_files=True, key="image_uploader",
label_visibility="collapsed")
# Process uploaded images
if uploaded_files and co:
st.write(f"Processing {len(uploaded_files)} uploaded images...")
progress_bar = st.progress(0)
# Create a temporary directory for uploaded images
upload_folder = "uploaded_img"
os.makedirs(upload_folder, exist_ok=True)
newly_uploaded_paths = []
newly_uploaded_embeddings = []
for i, uploaded_file in enumerate(uploaded_files):
# Check if already processed this session (simple name check)
img_path = os.path.join(upload_folder, uploaded_file.name)
if img_path not in st.session_state.image_paths:
try:
# Check file type
file_type = uploaded_file.type
if file_type == "application/pdf":
# Process PDF - returns list of paths and list of embeddings
pdf_page_paths, pdf_page_embeddings = process_pdf_file(uploaded_file, cohere_client=co)
if pdf_page_paths and pdf_page_embeddings:
# Add only paths/embeddings not already in session state
current_paths_set = set(st.session_state.image_paths)
unique_new_paths = [p for p in pdf_page_paths if p not in current_paths_set]
if unique_new_paths:
indices_to_add = [i for i, p in enumerate(pdf_page_paths) if p in unique_new_paths]
newly_uploaded_paths.extend(unique_new_paths)
newly_uploaded_embeddings.extend([pdf_page_embeddings[idx] for idx in indices_to_add])
elif file_type in ["image/png", "image/jpeg"]:
# Process regular image
# Save the uploaded file
with open(img_path, "wb") as f:
f.write(uploaded_file.getbuffer())
# Get embedding
base64_img = base64_from_image(img_path)
emb = compute_image_embedding(base64_img, _cohere_client=co)
if emb is not None:
newly_uploaded_paths.append(img_path)
newly_uploaded_embeddings.append(emb)
else:
st.warning(f"Unsupported file type skipped: {uploaded_file.name} ({file_type})")
except Exception as e:
st.error(f"Error processing {uploaded_file.name}: {e}")
# Update progress regardless of processing status for user feedback
progress_bar.progress((i + 1) / len(uploaded_files))
# Add newly processed files to session state
if newly_uploaded_paths:
st.session_state.image_paths.extend(newly_uploaded_paths)
if newly_uploaded_embeddings:
new_embeddings_array = np.vstack(newly_uploaded_embeddings)
if st.session_state.doc_embeddings is None or st.session_state.doc_embeddings.size == 0:
st.session_state.doc_embeddings = new_embeddings_array
else:
st.session_state.doc_embeddings = np.vstack((st.session_state.doc_embeddings, new_embeddings_array))
st.success(f"Successfully processed and added {len(newly_uploaded_paths)} new images.")
else:
st.warning("Failed to generate embeddings for newly uploaded images.")
elif uploaded_files: # If files were selected but none were new
st.info("Selected images already seem to be processed.")
# --- Vision RAG Section (Main UI) ---
st.markdown("---")
st.subheader("❓ Ask a Question")
if not st.session_state.image_paths:
st.warning("Please load sample images or upload your own images first.")
else:
st.info(f"Ready to answer questions about {len(st.session_state.image_paths)} images.")
# Display thumbnails of all loaded images (optional)
with st.expander("View Loaded Images", expanded=False):
if st.session_state.image_paths:
num_images_to_show = len(st.session_state.image_paths)
cols = st.columns(5) # Show 5 thumbnails per row
for i in range(num_images_to_show):
with cols[i % 5]:
# Add try-except for missing files during display
try:
# Display PDF pages differently? For now, just show the image
st.image(st.session_state.image_paths[i], width=100, caption=os.path.basename(st.session_state.image_paths[i]))
except FileNotFoundError:
st.error(f"Missing: {os.path.basename(st.session_state.image_paths[i])}")
else:
st.write("No images loaded yet.")
question = st.text_input("Ask a question about the loaded images:",
key="main_question_input",
placeholder="E.g., What is Nike's net profit?",
disabled=not st.session_state.image_paths)
run_button = st.button("Run Vision RAG", key="main_run_button",
disabled=not (cohere_api_key and google_api_key and question and st.session_state.image_paths and st.session_state.doc_embeddings is not None and st.session_state.doc_embeddings.size > 0))
# Output Area
st.markdown("### Results")
retrieved_image_placeholder = st.empty()
answer_placeholder = st.empty()
# Run search and answer logic
if run_button:
if co and genai_client and st.session_state.doc_embeddings is not None and len(st.session_state.doc_embeddings) > 0:
with st.spinner("Finding relevant image..."):
# Ensure embeddings and paths match before search
if len(st.session_state.image_paths) != st.session_state.doc_embeddings.shape[0]:
st.error("Error: Mismatch between number of images and embeddings. Cannot proceed.")
else:
top_image_path = search(question, co, st.session_state.doc_embeddings, st.session_state.image_paths)
if top_image_path:
caption = f"Retrieved content for: '{question}' (Source: {os.path.basename(top_image_path)})"
# Add source PDF name if it's a page image
if top_image_path.startswith("pdf_pages/"):
parts = top_image_path.split(os.sep)
if len(parts) >= 3:
pdf_name = parts[1]
page_name = parts[-1]
caption = f"Retrieved content for: '{question}' (Source: {pdf_name}.pdf, {page_name.replace('.png','')})"
retrieved_image_placeholder.image(top_image_path, caption=caption, use_container_width=True)
with st.spinner("Generating answer..."):
final_answer = answer(question, top_image_path, genai_client)
answer_placeholder.markdown(f"**Answer:**\n{final_answer}")
else:
retrieved_image_placeholder.warning("Could not find a relevant image for your question.")
answer_placeholder.text("") # Clear answer placeholder
else:
# This case should ideally be prevented by the disabled state of the button
st.error("Cannot run RAG. Check API clients and ensure images are loaded with embeddings.")
# Footer
st.markdown("---")
st.caption("Vision RAG with Cohere Embed-4 | Built with Streamlit, Cohere Embed-4, and Google Gemini 2.5 Flash") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/agentic_rag_gpt5/agentic_rag_gpt5.py | rag_tutorials/agentic_rag_gpt5/agentic_rag_gpt5.py | import streamlit as st
import os
from agno.agent import Agent
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.vectordb.lancedb import LanceDb, SearchType
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Page configuration
st.set_page_config(
page_title="Agentic RAG with GPT-5",
page_icon="🧠",
layout="wide"
)
# Main title and description
st.title("🧠 Agentic RAG with GPT-5")
st.markdown("""
This app demonstrates an intelligent AI agent that:
1. **Retrieves** relevant information from knowledge sources using LanceDB
2. **Answers** your questions clearly and concisely
Enter your OpenAI API key in the sidebar to get started!
""")
# Sidebar for API key and settings
with st.sidebar:
st.header("🔧 Configuration")
# OpenAI API Key
openai_key = st.text_input(
"OpenAI API Key",
type="password",
value=os.getenv("OPENAI_API_KEY", ""),
help="Get your key from https://platform.openai.com/"
)
# Add URLs to knowledge base
st.subheader("🌐 Add Knowledge Sources")
new_url = st.text_input(
"Add URL",
placeholder="https://www.theunwindai.com/p/mcp-vs-a2a-complementing-or-supplementing",
help="Enter a URL to add to the knowledge base"
)
if st.button("➕ Add URL", type="primary"):
if new_url:
st.session_state.urls_to_add = new_url
st.success(f"URL added to queue: {new_url}")
else:
st.error("Please enter a URL")
# Check if API key is provided
if openai_key:
# Initialize URLs in session state
if 'knowledge_urls' not in st.session_state:
st.session_state.knowledge_urls = ["https://www.theunwindai.com/p/mcp-vs-a2a-complementing-or-supplementing"] # Default URL
if 'urls_loaded' not in st.session_state:
st.session_state.urls_loaded = set()
# Initialize knowledge base (cached to avoid reloading)
@st.cache_resource(show_spinner="📚 Loading knowledge base...")
def load_knowledge() -> Knowledge:
"""Load and initialize the knowledge base with LanceDB"""
kb = Knowledge(
vector_db=LanceDb(
uri="tmp/lancedb",
table_name="agentic_rag_docs",
search_type=SearchType.vector, # Use vector search
embedder=OpenAIEmbedder(
api_key=openai_key
),
),
)
return kb
# Initialize agent (cached to avoid reloading)
@st.cache_resource(show_spinner="🤖 Loading agent...")
def load_agent(_kb: Knowledge) -> Agent:
"""Create an agent with reasoning capabilities"""
return Agent(
model=OpenAIChat(
id="gpt-5",
api_key=openai_key
),
knowledge=_kb,
search_knowledge=True, # Enable knowledge search
instructions=[
"Always search your knowledge before answering the question.",
"Provide clear, well-structured answers in markdown format.",
"Use proper markdown formatting with headers, lists, and emphasis where appropriate.",
"Structure your response with clear sections and bullet points when helpful.",
],
markdown=True, # Enable markdown formatting
)
# Load knowledge and agent
knowledge = load_knowledge()
# Load initial URLs if any (only load once per URL)
for url in st.session_state.knowledge_urls:
if url not in st.session_state.urls_loaded:
knowledge.add_content(url=url)
st.session_state.urls_loaded.add(url)
agent = load_agent(knowledge)
# Display current URLs in knowledge base
if st.session_state.knowledge_urls:
st.sidebar.subheader("📚 Current Knowledge Sources")
for i, url in enumerate(st.session_state.knowledge_urls, 1):
st.sidebar.markdown(f"{i}. {url}")
# Handle URL additions
if hasattr(st.session_state, 'urls_to_add') and st.session_state.urls_to_add:
new_url = st.session_state.urls_to_add
if new_url not in st.session_state.knowledge_urls:
st.session_state.knowledge_urls.append(new_url)
with st.spinner("📥 Loading new documents..."):
if new_url not in st.session_state.urls_loaded:
knowledge.add_content(url=new_url)
st.session_state.urls_loaded.add(new_url)
st.success(f"✅ Added: {new_url}")
del st.session_state.urls_to_add
st.rerun()
# Main query section
st.divider()
st.subheader("🤔 Ask a Question")
# Suggested prompts
st.markdown("**Try these prompts:**")
col1, col2, col3 = st.columns(3)
with col1:
if st.button("What is MCP?", use_container_width=True):
st.session_state.query = "What is MCP (Model Context Protocol) and how does it work?"
with col2:
if st.button("MCP vs A2A", use_container_width=True):
st.session_state.query = "How do MCP and A2A protocols differ, and are they complementary or competing?"
with col3:
if st.button("Agent Communication", use_container_width=True):
st.session_state.query = "How do MCP and A2A work together in AI agent systems for communication and tool access?"
# Query input
query = st.text_area(
"Your question:",
value=st.session_state.get("query", "What is the difference between MCP and A2A protocols?"),
height=100,
help="Ask anything about the loaded knowledge sources"
)
# Run button
if st.button("🚀 Get Answer", type="primary"):
if query:
# Create container for answer
st.markdown("### 💡 Answer")
answer_container = st.container()
answer_placeholder = answer_container.empty()
# Variables to accumulate content
answer_text = ""
# Stream the agent's response
with st.spinner("🔍 Searching and generating answer..."):
for chunk in agent.run(
query,
stream=True, # Enable streaming
):
# Update answer display - show content from streaming chunks
if hasattr(chunk, 'content') and chunk.content and isinstance(chunk.content, str):
answer_text += chunk.content
answer_placeholder.markdown(
answer_text,
unsafe_allow_html=True
)
else:
st.error("Please enter a question")
else:
# Show instructions if API key is missing
st.info("""
👋 **Welcome! To use this app, you need:**
- **OpenAI API Key** (set it in the sidebar)
- Sign up at [platform.openai.com](https://platform.openai.com/)
- Generate a new API key
Once you enter the key, the app will load the knowledge base and agent.
""")
# Footer with explanation
st.divider()
with st.expander("📖 How This Works"):
st.markdown("""
**This app uses the Agno framework to create an intelligent Q&A system:**
1. **Knowledge Loading**: URLs are processed and stored in LanceDB vector database
2. **Vector Search**: Uses OpenAI's embeddings for semantic search to find relevant information
3. **GPT-5**: OpenAI's GPT-5 model processes the information and generates answers
**Key Components:**
- `Knowledge`: Manages document loading from URLs
- `LanceDb`: Vector database for efficient similarity search
- `OpenAIEmbedder`: Converts text to embeddings using OpenAI's embedding model
- `Agent`: Orchestrates everything to answer questions
**Why LanceDB?**
- Lightweight and easy to set up
- No external database required
- Fast vector search capabilities
- Perfect for prototyping and small to medium-scale applications
""")
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/local_hybrid_search_rag/local_main.py | rag_tutorials/local_hybrid_search_rag/local_main.py | import os
import logging
import streamlit as st
from raglite import RAGLiteConfig, insert_document, hybrid_search, retrieve_chunks, rerank_chunks, rag
from rerankers import Reranker
from typing import List, Dict, Any
from pathlib import Path
import time
import warnings
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore", message=".*torch.classes.*")
RAG_SYSTEM_PROMPT = """
You are a friendly and knowledgeable assistant that provides complete and insightful answers.
Answer the user's question using only the context below.
When responding, you MUST NOT reference the existence of the context, directly or indirectly.
Instead, you MUST treat the context as if its contents are entirely part of your working memory.
""".strip()
def initialize_config(settings: Dict[str, Any]) -> RAGLiteConfig:
"""Initializes and returns a RAGLiteConfig object based on provided settings.
This function constructs a RAGLiteConfig object using the database URL,
language model path, and embedder path specified in the `settings` dictionary.
The configuration includes default options for embedder normalization and
chunk size. A reranker is also initialized with a predefined model.
Args:
settings (Dict[str, Any]): A dictionary containing configuration
parameters. Expected keys are 'DBUrl', 'LLMPath', and 'EmbedderPath'.
Returns:
RAGLiteConfig: An initialized configuration object for RAGLite.
Raises:
ValueError: If there is an error in the configuration process, such as
missing keys or invalid values in the settings dictionary."""
try:
return RAGLiteConfig(
db_url=settings["DBUrl"],
llm=f"llama-cpp-python/{settings['LLMPath']}",
embedder=f"llama-cpp-python/{settings['EmbedderPath']}",
embedder_normalize=True,
chunk_max_size=512,
reranker=Reranker("ms-marco-MiniLM-L-12-v2", model_type="flashrank")
)
except Exception as e:
raise ValueError(f"Configuration error: {e}")
def process_document(file_path: str) -> bool:
"""Processes a document by inserting it into a system with a given configuration.
This function attempts to insert a document specified by the file path into
a system using a predefined configuration stored in the session state. It
logs an error if the operation fails.
Args:
file_path (str): The path to the document file that needs to be processed.
Returns:
bool: True if the document is successfully processed; False if an error occurs."""
try:
if not st.session_state.get('my_config'):
raise ValueError("Configuration not initialized")
insert_document(Path(file_path), config=st.session_state.my_config)
return True
except Exception as e:
logger.error(f"Error processing document: {str(e)}")
return False
def perform_search(query: str) -> List[dict]:
"""Conducts a hybrid search and returns reranked results.
This function performs a hybrid search using the provided query and
attempts to retrieve and rerank relevant chunks. It returns a list of
reranked search results.
Args:
query (str): The search query string.
Returns:
List[dict]: A list of dictionaries containing reranked search results.
Returns an empty list if no results are found or if an error occurs."""
try:
chunk_ids, scores = hybrid_search(query, num_results=10, config=st.session_state.my_config)
if not chunk_ids:
return []
chunks = retrieve_chunks(chunk_ids, config=st.session_state.my_config)
return rerank_chunks(query, chunks, config=st.session_state.my_config)
except Exception as e:
logger.error(f"Search error: {str(e)}")
return []
def handle_fallback(query: str) -> str:
try:
system_prompt = """You are a helpful AI assistant. When you don't know something,
be honest about it. Provide clear, concise, and accurate responses."""
response_stream = rag(
prompt=query,
system_prompt=system_prompt,
search=None,
messages=[],
max_tokens=1024,
temperature=0.7,
config=st.session_state.my_config
)
full_response = ""
for chunk in response_stream:
full_response += chunk
if not full_response.strip():
return "I apologize, but I couldn't generate a response. Please try rephrasing your question."
return full_response
except Exception as e:
logger.error(f"Fallback error: {str(e)}")
return "I apologize, but I encountered an error while processing your request. Please try again."
def main():
st.set_page_config(page_title="Local LLM-Powered Hybrid Search-RAG Assistant", layout="wide")
for state_var in ['chat_history', 'documents_loaded', 'my_config']:
if state_var not in st.session_state:
st.session_state[state_var] = [] if state_var == 'chat_history' else False if state_var == 'documents_loaded' else None
with st.sidebar:
st.title("Configuration")
llm_path = st.text_input(
"LLM Model Path",
value=st.session_state.get('llm_path', ''),
placeholder="TheBloke/Llama-2-7B-Chat-GGUF/llama-2-7b-chat.Q4_K_M.gguf@4096",
help="Path to your local LLM model in GGUF format"
)
embedder_path = st.text_input(
"Embedder Model Path",
value=st.session_state.get('embedder_path', ''),
placeholder="lm-kit/bge-m3-gguf/bge-m3-Q4_K_M.gguf@1024",
help="Path to your local embedding model in GGUF format"
)
db_url = st.text_input(
"Database URL",
value=st.session_state.get('db_url', ''),
placeholder="postgresql://user:pass@host:port/db",
help="Database connection URL"
)
if st.button("Save Configuration"):
try:
if not all([llm_path, embedder_path, db_url]):
st.error("All fields are required!")
return
settings = {
"LLMPath": llm_path,
"EmbedderPath": embedder_path,
"DBUrl": db_url
}
st.session_state.my_config = initialize_config(settings)
st.success("Configuration saved successfully!")
except Exception as e:
st.error(f"Configuration error: {str(e)}")
st.title("🖥️ Local RAG App with Hybrid Search")
if st.session_state.my_config:
uploaded_files = st.file_uploader(
"Upload PDF documents",
type=["pdf"],
accept_multiple_files=True,
key="pdf_uploader"
)
if uploaded_files:
success = False
for uploaded_file in uploaded_files:
with st.spinner(f"Processing {uploaded_file.name}..."):
temp_path = f"temp_{uploaded_file.name}"
with open(temp_path, "wb") as f:
f.write(uploaded_file.getvalue())
if process_document(temp_path):
st.success(f"Successfully processed: {uploaded_file.name}")
success = True
else:
st.error(f"Failed to process: {uploaded_file.name}")
os.remove(temp_path)
if success:
st.session_state.documents_loaded = True
st.success("Documents are ready! You can now ask questions about them.")
if st.session_state.documents_loaded:
for msg in st.session_state.chat_history:
with st.chat_message("user"): st.write(msg[0])
with st.chat_message("assistant"): st.write(msg[1])
user_input = st.chat_input("Ask a question about the documents...")
if user_input:
with st.chat_message("user"): st.write(user_input)
with st.chat_message("assistant"):
message_placeholder = st.empty()
try:
reranked_chunks = perform_search(query=user_input)
if not reranked_chunks or len(reranked_chunks) == 0:
logger.info("No relevant documents found. Falling back to local LLM.")
with st.spinner("Using general knowledge to answer..."):
full_response = handle_fallback(user_input)
if full_response.startswith("I apologize"):
st.warning("No relevant documents found and fallback failed.")
else:
st.info("Answering from general knowledge.")
else:
formatted_messages = [
{"role": "user" if i % 2 == 0 else "assistant", "content": msg}
for i, msg in enumerate([m for pair in st.session_state.chat_history for m in pair])
if msg
]
response_stream = rag(
prompt=user_input,
system_prompt=RAG_SYSTEM_PROMPT,
search=hybrid_search,
messages=formatted_messages,
max_contexts=5,
config=st.session_state.my_config
)
full_response = ""
for chunk in response_stream:
full_response += chunk
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.chat_history.append((user_input, full_response))
except Exception as e:
logger.error(f"Error: {str(e)}")
st.error(f"Error: {str(e)}")
else:
st.info(
"Please configure your model paths and upload documents to get started."
if not st.session_state.my_config
else "Please upload some documents to get started."
)
if __name__ == "__main__":
main()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/ai_blog_search/app.py | rag_tutorials/ai_blog_search/app.py | from langchain_google_genai import GoogleGenerativeAIEmbeddings
from langchain_qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
from uuid import uuid4
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain.tools.retriever import create_retriever_tool
from typing import Annotated, Literal, Sequence
from typing_extensions import TypedDict
from functools import partial
from langchain import hub
from langchain_core.messages import BaseMessage, HumanMessage
from langgraph.graph.message import add_messages
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_google_genai import ChatGoogleGenerativeAI
from pydantic import BaseModel, Field
from langgraph.graph import END, StateGraph, START
from langgraph.prebuilt import ToolNode, tools_condition
import streamlit as st
st.set_page_config(page_title="AI Blog Search", page_icon=":mag_right:")
st.header(":blue[Agentic RAG with LangGraph:] :green[AI Blog Search]")
# Initialize session state variables if they don't exist
if 'qdrant_host' not in st.session_state:
st.session_state.qdrant_host = ""
if 'qdrant_api_key' not in st.session_state:
st.session_state.qdrant_api_key = ""
if 'gemini_api_key' not in st.session_state:
st.session_state.gemini_api_key = ""
def set_sidebar():
"""Setup sidebar for API keys and configuration."""
with st.sidebar:
st.subheader("API Configuration")
qdrant_host = st.text_input("Enter your Qdrant Host URL:", type="password")
qdrant_api_key = st.text_input("Enter your Qdrant API key:", type="password")
gemini_api_key = st.text_input("Enter your Gemini API key:", type="password")
if st.button("Done"):
if qdrant_host and qdrant_api_key and gemini_api_key:
st.session_state.qdrant_host = qdrant_host
st.session_state.qdrant_api_key = qdrant_api_key
st.session_state.gemini_api_key = gemini_api_key
st.success("API keys saved!")
else:
st.warning("Please fill all API fields")
def initialize_components():
"""Initialize components that require API keys"""
if not all([st.session_state.qdrant_host,
st.session_state.qdrant_api_key,
st.session_state.gemini_api_key]):
return None, None, None
try:
# Initialize embedding model with API key
embedding_model = GoogleGenerativeAIEmbeddings(
model="models/embedding-001",
google_api_key=st.session_state.gemini_api_key
)
# Initialize Qdrant client
client = QdrantClient(
st.session_state.qdrant_host,
api_key=st.session_state.qdrant_api_key
)
# Initialize vector store
db = QdrantVectorStore(
client=client,
collection_name="qdrant_db",
embedding=embedding_model
)
return embedding_model, client, db
except Exception as e:
st.error(f"Initialization error: {str(e)}")
return None, None, None
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], add_messages]
# Edges
## Check Relevance
def grade_documents(state) -> Literal["generate", "rewrite"]:
"""
Determines whether the retrieved documents are relevant to the question.
Args:
state (messages): The current state
Returns:
str: A decision for whether the documents are relevant or not
"""
print("---CHECK RELEVANCE---")
# Data model
class grade(BaseModel):
"""Binary score for relevance check."""
binary_score: str = Field(description="Relevance score 'yes' or 'no'")
# LLM
model = ChatGoogleGenerativeAI(api_key=st.session_state.gemini_api_key, temperature=0, model="gemini-2.0-flash", streaming=True)
# LLM with tool and validation
llm_with_tool = model.with_structured_output(grade)
# Prompt
prompt = PromptTemplate(
template="""You are a grader assessing relevance of a retrieved document to a user question. \n
Here is the retrieved document: \n\n {context} \n\n
Here is the user question: {question} \n
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant. \n
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.""",
input_variables=["context", "question"],
)
# Chain
chain = prompt | llm_with_tool
messages = state["messages"]
last_message = messages[-1]
question = messages[0].content
docs = last_message.content
scored_result = chain.invoke({"question": question, "context": docs})
score = scored_result.binary_score
if score == "yes":
print("---DECISION: DOCS RELEVANT---")
return "generate"
else:
print("---DECISION: DOCS NOT RELEVANT---")
print(score)
return "rewrite"
# Nodes
## agent node
def agent(state, tools):
"""
Invokes the agent model to generate a response based on the current state. Given
the question, it will decide to retrieve using the retriever tool, or simply end.
Args:
state (messages): The current state
Returns:
dict: The updated state with the agent response appended to messages
"""
print("---CALL AGENT---")
messages = state["messages"]
model = ChatGoogleGenerativeAI(api_key=st.session_state.gemini_api_key, temperature=0, streaming=True, model="gemini-2.0-flash")
model = model.bind_tools(tools)
response = model.invoke(messages)
# We return a list, because this will get added to the existing list
return {"messages": [response]}
## rewrite node
def rewrite(state):
"""
Transform the query to produce a better question.
Args:
state (messages): The current state
Returns:
dict: The updated state with re-phrased question
"""
print("---TRANSFORM QUERY---")
messages = state["messages"]
question = messages[0].content
msg = [
HumanMessage(
content=f""" \n
Look at the input and try to reason about the underlying semantic intent / meaning. \n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Formulate an improved question: """,
)
]
# Grader
model = ChatGoogleGenerativeAI(api_key=st.session_state.gemini_api_key, temperature=0, model="gemini-2.0-flash", streaming=True)
response = model.invoke(msg)
return {"messages": [response]}
## generate node
def generate(state):
"""
Generate answer
Args:
state (messages): The current state
Returns:
dict: The updated state with re-phrased question
"""
print("---GENERATE---")
messages = state["messages"]
question = messages[0].content
last_message = messages[-1]
docs = last_message.content
# Initialize a Chat Prompt Template
prompt_template = hub.pull("rlm/rag-prompt")
# Initialize a Generator (i.e. Chat Model)
chat_model = ChatGoogleGenerativeAI(api_key=st.session_state.gemini_api_key, model="gemini-2.0-flash", temperature=0, streaming=True)
# Initialize a Output Parser
output_parser = StrOutputParser()
# RAG Chain
rag_chain = prompt_template | chat_model | output_parser
response = rag_chain.invoke({"context": docs, "question": question})
return {"messages": [response]}
# graph function
def get_graph(retriever_tool):
tools = [retriever_tool] # Create tools list here
# Define a new graph
workflow = StateGraph(AgentState)
# Use partial to pass tools to the agent function
workflow.add_node("agent", partial(agent, tools=tools))
# Rest of the graph setup remains the same
retrieve = ToolNode(tools)
workflow.add_node("retrieve", retrieve)
workflow.add_node("rewrite", rewrite) # Re-writing the question
workflow.add_node(
"generate", generate
) # Generating a response after we know the documents are relevant
# Call agent node to decide to retrieve or not
workflow.add_edge(START, "agent")
# Decide whether to retrieve
workflow.add_conditional_edges(
"agent",
# Assess agent decision
tools_condition,
{
# Translate the condition outputs to nodes in our graph
"tools": "retrieve",
END: END,
},
)
# Edges taken after the `action` node is called.
workflow.add_conditional_edges(
"retrieve",
# Assess agent decision
grade_documents,
)
workflow.add_edge("generate", END)
workflow.add_edge("rewrite", "agent")
# Compile
graph = workflow.compile()
return graph
def generate_message(graph, inputs):
generated_message = ""
for output in graph.stream(inputs):
for key, value in output.items():
if key == "generate" and isinstance(value, dict):
generated_message = value.get("messages", [""])[0]
return generated_message
def add_documents_to_qdrant(url, db):
try:
docs = WebBaseLoader(url).load()
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=100, chunk_overlap=50
)
doc_chunks = text_splitter.split_documents(docs)
uuids = [str(uuid4()) for _ in range(len(doc_chunks))]
db.add_documents(documents=doc_chunks, ids=uuids)
return True
except Exception as e:
st.error(f"Error adding documents: {str(e)}")
return False
def main():
set_sidebar()
# Check if API keys are set
if not all([st.session_state.qdrant_host,
st.session_state.qdrant_api_key,
st.session_state.gemini_api_key]):
st.warning("Please configure your API keys in the sidebar first")
return
# Initialize components
embedding_model, client, db = initialize_components()
if not all([embedding_model, client, db]):
return
# Initialize retriever and tools
retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": 5})
retriever_tool = create_retriever_tool(
retriever,
"retrieve_blog_posts",
"Search and return information about blog posts on LLMs, LLM agents, prompt engineering, and adversarial attacks on LLMs.",
)
tools = [retriever_tool]
# URL input section
url = st.text_input(
":link: Paste the blog link:",
placeholder="e.g., https://lilianweng.github.io/posts/2023-06-23-agent/"
)
if st.button("Enter URL"):
if url:
with st.spinner("Processing documents..."):
if add_documents_to_qdrant(url, db):
st.success("Documents added successfully!")
else:
st.error("Failed to add documents")
else:
st.warning("Please enter a URL")
# Query section
graph = get_graph(retriever_tool)
query = st.text_area(
":bulb: Enter your query about the blog post:",
placeholder="e.g., What does Lilian Weng say about the types of agent memory?"
)
if st.button("Submit Query"):
if not query:
st.warning("Please enter a query")
return
inputs = {"messages": [HumanMessage(content=query)]}
with st.spinner("Generating response..."):
try:
response = generate_message(graph, inputs)
st.write(response)
except Exception as e:
st.error(f"Error generating response: {str(e)}")
st.markdown("---")
st.write("Built with :blue-background[LangChain] | :blue-background[LangGraph] by [Charan](https://www.linkedin.com/in/codewithcharan/)")
if __name__ == "__main__":
main() | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/hybrid_search_rag/main.py | rag_tutorials/hybrid_search_rag/main.py | import os
import logging
import streamlit as st
from raglite import RAGLiteConfig, insert_document, hybrid_search, retrieve_chunks, rerank_chunks, rag
from rerankers import Reranker
from typing import List
from pathlib import Path
import anthropic
import time
import warnings
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore", message=".*torch.classes.*")
RAG_SYSTEM_PROMPT = """
You are a friendly and knowledgeable assistant that provides complete and insightful answers.
Answer the user's question using only the context below.
When responding, you MUST NOT reference the existence of the context, directly or indirectly.
Instead, you MUST treat the context as if its contents are entirely part of your working memory.
""".strip()
def initialize_config(openai_key: str, anthropic_key: str, cohere_key: str, db_url: str) -> RAGLiteConfig:
"""Initializes and returns a RAGLiteConfig object with the specified API keys and database URL.
This function sets the provided API keys in the environment variables and returns a
RAGLiteConfig object configured with the given database URL and pre-defined settings for
language model, embedder, and reranker.
Args:
openai_key (str): The API key for OpenAI services.
anthropic_key (str): The API key for Anthropic services.
cohere_key (str): The API key for Cohere services.
db_url (str): The database URL for connecting to the desired data source.
Returns:
RAGLiteConfig: A configuration object initialized with the specified parameters.
Raises:
ValueError: If there is an issue setting up the configuration, an error is raised with details."""
try:
os.environ["OPENAI_API_KEY"] = openai_key
os.environ["ANTHROPIC_API_KEY"] = anthropic_key
os.environ["COHERE_API_KEY"] = cohere_key
return RAGLiteConfig(
db_url=db_url,
llm="claude-3-opus-20240229",
embedder="text-embedding-3-large",
embedder_normalize=True,
chunk_max_size=2000,
embedder_sentence_window_size=2,
reranker=Reranker("cohere", api_key=cohere_key, lang="en")
)
except Exception as e:
raise ValueError(f"Configuration error: {e}")
def process_document(file_path: str) -> bool:
"""Processes a document by inserting it into a system with a given configuration.
This function checks if a configuration is initialized in the session state.
If the configuration is present, it attempts to insert the document located
at the given file path using this configuration.
Args:
file_path (str): The path to the document to be processed.
Returns:
bool: True if the document was successfully processed; False otherwise."""
try:
if not st.session_state.get('my_config'):
raise ValueError("Configuration not initialized")
insert_document(Path(file_path), config=st.session_state.my_config)
return True
except Exception as e:
logger.error(f"Error processing document: {str(e)}")
return False
def perform_search(query: str) -> List[dict]:
"""Conducts a hybrid search and returns a list of ranked chunks based on the query.
This function performs a search using a hybrid search method, retrieves the relevant
chunks, and reranks them according to the query. It handles any exceptions that occur
during the process and logs the errors.
Args:
query (str): The search query string.
Returns:
List[dict]: A list of dictionaries representing the ranked chunks. Returns an
empty list if no results are found or if an error occurs."""
try:
chunk_ids, scores = hybrid_search(query, num_results=10, config=st.session_state.my_config)
if not chunk_ids:
return []
chunks = retrieve_chunks(chunk_ids, config=st.session_state.my_config)
return rerank_chunks(query, chunks, config=st.session_state.my_config)
except Exception as e:
logger.error(f"Search error: {str(e)}")
return []
def handle_fallback(query: str) -> str:
try:
client = anthropic.Anthropic(api_key=st.session_state.user_env["ANTHROPIC_API_KEY"])
system_prompt = """You are a helpful AI assistant. When you don't know something,
be honest about it. Provide clear, concise, and accurate responses. If the question
is not related to any specific document, use your general knowledge to answer."""
message = client.messages.create(
model="claude-3-sonnet-20240229",
max_tokens=1024,
system=system_prompt,
messages=[{"role": "user", "content": query}],
temperature=0.7
)
return message.content[0].text
except Exception as e:
logger.error(f"Fallback error: {str(e)}")
st.error(f"Fallback error: {str(e)}") # Show error in UI
return "I apologize, but I encountered an error while processing your request. Please try again."
def main():
st.set_page_config(page_title="LLM-Powered Hybrid Search-RAG Assistant", layout="wide")
for state_var in ['chat_history', 'documents_loaded', 'my_config', 'user_env']:
if state_var not in st.session_state:
st.session_state[state_var] = [] if state_var == 'chat_history' else False if state_var == 'documents_loaded' else None if state_var == 'my_config' else {}
with st.sidebar:
st.title("Configuration")
openai_key = st.text_input("OpenAI API Key", value=st.session_state.get('openai_key', ''), type="password", placeholder="sk-...")
anthropic_key = st.text_input("Anthropic API Key", value=st.session_state.get('anthropic_key', ''), type="password", placeholder="sk-ant-...")
cohere_key = st.text_input("Cohere API Key", value=st.session_state.get('cohere_key', ''), type="password", placeholder="Enter Cohere key")
db_url = st.text_input("Database URL", value=st.session_state.get('db_url', 'sqlite:///raglite.sqlite'), placeholder="sqlite:///raglite.sqlite")
if st.button("Save Configuration"):
try:
if not all([openai_key, anthropic_key, cohere_key, db_url]):
st.error("All fields are required!")
return
for key, value in {'openai_key': openai_key, 'anthropic_key': anthropic_key, 'cohere_key': cohere_key, 'db_url': db_url}.items():
st.session_state[key] = value
st.session_state.my_config = initialize_config(openai_key=openai_key, anthropic_key=anthropic_key, cohere_key=cohere_key, db_url=db_url)
st.session_state.user_env = {"ANTHROPIC_API_KEY": anthropic_key}
st.success("Configuration saved successfully!")
except Exception as e:
st.error(f"Configuration error: {str(e)}")
st.title("👀 RAG App with Hybrid Search")
if st.session_state.my_config:
uploaded_files = st.file_uploader("Upload PDF documents", type=["pdf"], accept_multiple_files=True, key="pdf_uploader")
if uploaded_files:
success = False
for uploaded_file in uploaded_files:
with st.spinner(f"Processing {uploaded_file.name}..."):
temp_path = f"temp_{uploaded_file.name}"
with open(temp_path, "wb") as f:
f.write(uploaded_file.getvalue())
if process_document(temp_path):
st.success(f"Successfully processed: {uploaded_file.name}")
success = True
else:
st.error(f"Failed to process: {uploaded_file.name}")
os.remove(temp_path)
if success:
st.session_state.documents_loaded = True
st.success("Documents are ready! You can now ask questions about them.")
if st.session_state.documents_loaded:
for msg in st.session_state.chat_history:
with st.chat_message("user"): st.write(msg[0])
with st.chat_message("assistant"): st.write(msg[1])
user_input = st.chat_input("Ask a question about the documents...")
if user_input:
with st.chat_message("user"): st.write(user_input)
with st.chat_message("assistant"):
message_placeholder = st.empty()
try:
reranked_chunks = perform_search(query=user_input)
if not reranked_chunks or len(reranked_chunks) == 0:
logger.info("No relevant documents found. Falling back to Claude.")
st.info("No relevant documents found. Using general knowledge to answer.")
full_response = handle_fallback(user_input)
else:
formatted_messages = [{"role": "user" if i % 2 == 0 else "assistant", "content": msg}
for i, msg in enumerate([m for pair in st.session_state.chat_history for m in pair]) if msg]
response_stream = rag(prompt=user_input,
system_prompt=RAG_SYSTEM_PROMPT,
search=hybrid_search,
messages=formatted_messages,
max_contexts=5,
config=st.session_state.my_config)
full_response = ""
for chunk in response_stream:
full_response += chunk
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.chat_history.append((user_input, full_response))
except Exception as e:
st.error(f"Error: {str(e)}")
else:
st.info("Please configure your API keys and upload documents to get started." if not st.session_state.my_config else "Please upload some documents to get started.")
if __name__ == "__main__":
main() | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/rag_tutorials/corrective_rag/corrective_rag.py | rag_tutorials/corrective_rag/corrective_rag.py | from langchain import hub
from langchain.output_parsers import PydanticOutputParser
from langchain_core.output_parsers import StrOutputParser
from langchain.schema import Document
from pydantic import BaseModel, Field
import streamlit as st
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader, TextLoader, WebBaseLoader
from langchain_community.tools import TavilySearchResults
from langchain_community.vectorstores import Qdrant
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_core.messages import HumanMessage
from langgraph.graph import END, StateGraph
from typing import Dict, TypedDict
from langchain_core.prompts import PromptTemplate
import pprint
import yaml
import nest_asyncio
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
import tempfile
import os
from langchain_anthropic import ChatAnthropic
from tenacity import retry, stop_after_attempt, wait_exponential
nest_asyncio.apply()
retriever = None
def initialize_session_state():
"""Initialize session state variables for API keys and URLs."""
if 'initialized' not in st.session_state:
st.session_state.initialized = False
# Initialize API keys and URLs
st.session_state.anthropic_api_key = ""
st.session_state.openai_api_key = ""
st.session_state.tavily_api_key = ""
st.session_state.qdrant_api_key = ""
st.session_state.qdrant_url = "http://localhost:6333"
st.session_state.doc_url = "https://arxiv.org/pdf/2307.09288.pdf"
def setup_sidebar():
"""Setup sidebar for API keys and configuration."""
with st.sidebar:
st.subheader("API Configuration")
st.session_state.anthropic_api_key = st.text_input("Anthropic API Key", value=st.session_state.anthropic_api_key, type="password", help="Required for Claude 3 model")
st.session_state.openai_api_key = st.text_input("OpenAI API Key", value=st.session_state.openai_api_key, type="password")
st.session_state.tavily_api_key = st.text_input("Tavily API Key", value=st.session_state.tavily_api_key, type="password")
st.session_state.qdrant_url = st.text_input("Qdrant URL", value=st.session_state.qdrant_url)
st.session_state.qdrant_api_key = st.text_input("Qdrant API Key", value=st.session_state.qdrant_api_key, type="password")
st.session_state.doc_url = st.text_input("Document URL", value=st.session_state.doc_url)
if not all([st.session_state.openai_api_key, st.session_state.anthropic_api_key, st.session_state.qdrant_url]):
st.warning("Please provide the required API keys and URLs")
st.stop()
st.session_state.initialized = True
initialize_session_state()
setup_sidebar()
# Use session state variables instead of config
openai_api_key = st.session_state.openai_api_key
tavily_api_key = st.session_state.tavily_api_key
anthropic_api_key = st.session_state.anthropic_api_key
# Update embeddings initialization
embeddings = OpenAIEmbeddings(
model="text-embedding-3-small",
api_key=st.session_state.openai_api_key
)
# Update Qdrant client initialization
client = QdrantClient(
url=st.session_state.qdrant_url,
api_key=st.session_state.qdrant_api_key
)
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
def execute_tavily_search(tool, query):
return tool.invoke({"query": query})
def web_search(state):
"""Web search based on the re-phrased question using Tavily API."""
print("~-web search-~")
state_dict = state["keys"]
question = state_dict["question"]
documents = state_dict["documents"]
# Create progress placeholder
progress_placeholder = st.empty()
progress_placeholder.info("Initiating web search...")
try:
# Validate Tavily API key
if not st.session_state.tavily_api_key:
progress_placeholder.warning("Tavily API key not provided - skipping web search")
return {"keys": {"documents": documents, "question": question}}
progress_placeholder.info("Configuring search tool...")
# Initialize Tavily search tool
tool = TavilySearchResults(
api_key=st.session_state.tavily_api_key,
max_results=3,
search_depth="advanced"
)
# Execute search with retry logic
progress_placeholder.info("Executing search query...")
try:
search_results = execute_tavily_search(tool, question)
except Exception as search_error:
progress_placeholder.error(f"Search failed after retries: {str(search_error)}")
return {"keys": {"documents": documents, "question": question}}
if not search_results:
progress_placeholder.warning("No search results found")
return {"keys": {"documents": documents, "question": question}}
# Process results
progress_placeholder.info("Processing search results...")
web_results = []
for result in search_results:
# Extract and format relevant information
content = (
f"Title: {result.get('title', 'No title')}\n"
f"Content: {result.get('content', 'No content')}\n"
)
web_results.append(content)
# Create document from results
web_document = Document(
page_content="\n\n".join(web_results),
metadata={
"source": "tavily_search",
"query": question,
"result_count": len(web_results)
}
)
documents.append(web_document)
progress_placeholder.success(f"Successfully added {len(web_results)} search results")
except Exception as error:
error_msg = f"Web search error: {str(error)}"
print(error_msg)
progress_placeholder.error(error_msg)
finally:
progress_placeholder.empty()
return {"keys": {"documents": documents, "question": question}}
def load_documents(file_or_url: str, is_url: bool = True) -> list:
try:
if is_url:
loader = WebBaseLoader(file_or_url)
loader.requests_per_second = 1
else:
file_extension = os.path.splitext(file_or_url)[1].lower()
if file_extension == '.pdf':
loader = PyPDFLoader(file_or_url)
elif file_extension in ['.txt', '.md']:
loader = TextLoader(file_or_url)
else:
raise ValueError(f"Unsupported file type: {file_extension}")
return loader.load()
except Exception as e:
st.error(f"Error loading document: {str(e)}")
return []
st.subheader("Document Input")
input_option = st.radio("Choose input method:", ["URL", "File Upload"])
docs = None
if input_option == "URL":
url = st.text_input("Enter document URL:", value=st.session_state.doc_url)
if url:
docs = load_documents(url, is_url=True)
else:
uploaded_file = st.file_uploader("Upload a document", type=['pdf', 'txt', 'md'])
if uploaded_file:
# Create a temporary file to store the upload
with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(uploaded_file.name)[1]) as tmp_file:
tmp_file.write(uploaded_file.getvalue())
docs = load_documents(tmp_file.name, is_url=False)
# Clean up the temporary file
os.unlink(tmp_file.name)
if docs:
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=500, chunk_overlap=100
)
all_splits = text_splitter.split_documents(docs)
client = QdrantClient(url=st.session_state.qdrant_url, api_key=st.session_state.qdrant_api_key)
collection_name = "rag-qdrant"
try:
# Try to delete the collection if it exists
client.delete_collection(collection_name)
except Exception:
pass
client.create_collection(
collection_name=collection_name,
vectors_config=VectorParams(size=1536, distance=Distance.COSINE),
)
# Create vectorstore
vectorstore = Qdrant(
client=client,
collection_name=collection_name,
embeddings=embeddings,
)
# Add documents to the vectorstore
vectorstore.add_documents(all_splits)
retriever = vectorstore.as_retriever()
class GraphState(TypedDict):
keys: Dict[str, any]
def retrieve(state):
print("~-retrieve-~")
state_dict = state["keys"]
question = state_dict["question"]
if retriever is None:
return {"keys": {"documents": [], "question": question}}
documents = retriever.get_relevant_documents(question)
return {"keys": {"documents": documents, "question": question}}
def generate(state):
"""Generate answer using Claude 3 model"""
print("~-generate-~")
state_dict = state["keys"]
question, documents = state_dict["question"], state_dict["documents"]
try:
prompt = PromptTemplate(template="""Based on the following context, please answer the question.
Context: {context}
Question: {question}
Answer:""", input_variables=["context", "question"])
llm = ChatAnthropic(model="claude-sonnet-4-5", api_key=st.session_state.anthropic_api_key,
temperature=0, max_tokens=1000)
context = "\n\n".join(doc.page_content for doc in documents)
# Create and run chain
rag_chain = (
{"context": lambda x: context, "question": lambda x: question}
| prompt
| llm
| StrOutputParser()
)
generation = rag_chain.invoke({})
return {
"keys": {
"documents": documents,
"question": question,
"generation": generation
}
}
except Exception as e:
error_msg = f"Error in generate function: {str(e)}"
print(error_msg)
st.error(error_msg)
return {"keys": {"documents": documents, "question": question,
"generation": "Sorry, I encountered an error while generating the response."}}
def grade_documents(state):
"""Determines whether the retrieved documents are relevant."""
print("~-check relevance-~")
state_dict = state["keys"]
question = state_dict["question"]
documents = state_dict["documents"]
llm = ChatAnthropic(model="claude-sonnet-4-5", api_key=st.session_state.anthropic_api_key,
temperature=0, max_tokens=1000)
prompt = PromptTemplate(template="""You are grading the relevance of a retrieved document to a user question.
Return ONLY a JSON object with a "score" field that is either "yes" or "no".
Do not include any other text or explanation.
Document: {context}
Question: {question}
Rules:
- Check for related keywords or semantic meaning
- Use lenient grading to only filter clear mismatches
- Return exactly like this example: {{"score": "yes"}} or {{"score": "no"}}""",
input_variables=["context", "question"])
chain = (
prompt
| llm
| StrOutputParser()
)
filtered_docs = []
search = "No"
for d in documents:
try:
response = chain.invoke({"question": question, "context": d.page_content})
import re
json_match = re.search(r'\{.*\}', response)
if json_match:
response = json_match.group()
import json
score = json.loads(response)
if score.get("score") == "yes":
print("~-grade: document relevant-~")
filtered_docs.append(d)
else:
print("~-grade: document not relevant-~")
search = "Yes"
except Exception as e:
print(f"Error grading document: {str(e)}")
# On error, keep the document to be safe
filtered_docs.append(d)
continue
return {"keys": {"documents": filtered_docs, "question": question, "run_web_search": search}}
def transform_query(state):
"""Transform the query to produce a better question."""
print("~-transform query-~")
state_dict = state["keys"]
question = state_dict["question"]
documents = state_dict["documents"]
# Create a prompt template
prompt = PromptTemplate(
template="""Generate a search-optimized version of this question by
analyzing its core semantic meaning and intent.
\n ------- \n
{question}
\n ------- \n
Return only the improved question with no additional text:""",
input_variables=["question"],
)
# Use Claude instead of Gemini
llm = ChatAnthropic(
model="claude-sonnet-4-5",
anthropic_api_key=st.session_state.anthropic_api_key,
temperature=0,
max_tokens=1000
)
# Prompt
chain = prompt | llm | StrOutputParser()
better_question = chain.invoke({"question": question})
return {
"keys": {"documents": documents, "question": better_question}
}
def decide_to_generate(state):
print("~-decide to generate-~")
state_dict = state["keys"]
search = state_dict["run_web_search"]
if search == "Yes":
print("~-decision: transform query and run web search-~")
return "transform_query"
else:
print("~-decision: generate-~")
return "generate"
def format_document(doc: Document) -> str:
return f"""
Source: {doc.metadata.get('source', 'Unknown')}
Title: {doc.metadata.get('title', 'No title')}
Content: {doc.page_content[:200]}...
"""
def format_state(state: dict) -> str:
formatted = {}
for key, value in state.items():
if key == "documents":
formatted[key] = [format_document(doc) for doc in value]
else:
formatted[key] = value
return formatted
workflow = StateGraph(GraphState)
# Define the nodes by langgraph
workflow.add_node("retrieve", retrieve)
workflow.add_node("grade_documents", grade_documents)
workflow.add_node("generate", generate)
workflow.add_node("transform_query", transform_query)
workflow.add_node("web_search", web_search)
# Build graph
workflow.set_entry_point("retrieve")
workflow.add_edge("retrieve", "grade_documents")
workflow.add_conditional_edges(
"grade_documents",
decide_to_generate,
{
"transform_query": "transform_query",
"generate": "generate",
},
)
workflow.add_edge("transform_query", "web_search")
workflow.add_edge("web_search", "generate")
workflow.add_edge("generate", END)
app = workflow.compile()
st.title("🔄 Corrective RAG Agent")
st.text("A possible query: What are the experiment results and ablation studies in this research paper?")
# User input
user_question = st.text_input("Please enter your question:")
if user_question:
inputs = {
"keys": {
"question": user_question,
}
}
for output in app.stream(inputs):
for key, value in output.items():
with st.expander(f"Step '{key}':"):
st.text(pprint.pformat(format_state(value["keys"]), indent=2, width=80))
final_generation = value['keys'].get('generation', 'No final generation produced.')
st.subheader("Final Generation:")
st.write(final_generation)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/voice_ai_agents/customer_support_voice_agent/customer_support_voice_agent.py | voice_ai_agents/customer_support_voice_agent/customer_support_voice_agent.py | from typing import List, Dict, Optional
from pathlib import Path
import os
from firecrawl import FirecrawlApp
from qdrant_client import QdrantClient
from qdrant_client.http import models
from qdrant_client.http.models import Distance, VectorParams
from fastembed import TextEmbedding
from agents import Agent, Runner
from openai import AsyncOpenAI
import tempfile
import uuid
from datetime import datetime
import time
import streamlit as st
from dotenv import load_dotenv
import asyncio
load_dotenv()
def init_session_state():
defaults = {
"initialized": False,
"qdrant_url": "",
"qdrant_api_key": "",
"firecrawl_api_key": "",
"openai_api_key": "",
"doc_url": "",
"setup_complete": False,
"client": None,
"embedding_model": None,
"processor_agent": None,
"tts_agent": None,
"selected_voice": "coral"
}
for key, value in defaults.items():
if key not in st.session_state:
st.session_state[key] = value
def sidebar_config():
with st.sidebar:
st.title("🔑 Configuration")
st.markdown("---")
st.session_state.qdrant_url = st.text_input(
"Qdrant URL",
value=st.session_state.qdrant_url,
type="password"
)
st.session_state.qdrant_api_key = st.text_input(
"Qdrant API Key",
value=st.session_state.qdrant_api_key,
type="password"
)
st.session_state.firecrawl_api_key = st.text_input(
"Firecrawl API Key",
value=st.session_state.firecrawl_api_key,
type="password"
)
st.session_state.openai_api_key = st.text_input(
"OpenAI API Key",
value=st.session_state.openai_api_key,
type="password"
)
st.markdown("---")
st.session_state.doc_url = st.text_input(
"Documentation URL",
value=st.session_state.doc_url,
placeholder="https://docs.example.com"
)
st.markdown("---")
st.markdown("### 🎤 Voice Settings")
voices = ["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
st.session_state.selected_voice = st.selectbox(
"Select Voice",
options=voices,
index=voices.index(st.session_state.selected_voice),
help="Choose the voice for the audio response"
)
if st.button("Initialize System", type="primary"):
if all([
st.session_state.qdrant_url,
st.session_state.qdrant_api_key,
st.session_state.firecrawl_api_key,
st.session_state.openai_api_key,
st.session_state.doc_url
]):
progress_placeholder = st.empty()
with progress_placeholder.container():
try:
st.markdown("🔄 Setting up Qdrant connection...")
client, embedding_model = setup_qdrant_collection(
st.session_state.qdrant_url,
st.session_state.qdrant_api_key
)
st.session_state.client = client
st.session_state.embedding_model = embedding_model
st.markdown("✅ Qdrant setup complete!")
st.markdown("🔄 Crawling documentation pages...")
pages = crawl_documentation(
st.session_state.firecrawl_api_key,
st.session_state.doc_url
)
st.markdown(f"✅ Crawled {len(pages)} documentation pages!")
store_embeddings(
client,
embedding_model,
pages,
"docs_embeddings"
)
processor_agent, tts_agent = setup_agents(
st.session_state.openai_api_key
)
st.session_state.processor_agent = processor_agent
st.session_state.tts_agent = tts_agent
st.session_state.setup_complete = True
st.success("✅ System initialized successfully!")
except Exception as e:
st.error(f"Error during setup: {str(e)}")
else:
st.error("Please fill in all the required fields!")
def setup_qdrant_collection(qdrant_url: str, qdrant_api_key: str, collection_name: str = "docs_embeddings"):
client = QdrantClient(url=qdrant_url, api_key=qdrant_api_key)
embedding_model = TextEmbedding()
test_embedding = list(embedding_model.embed(["test"]))[0]
embedding_dim = len(test_embedding)
try:
client.create_collection(
collection_name=collection_name,
vectors_config=VectorParams(size=embedding_dim, distance=Distance.COSINE)
)
except Exception as e:
if "already exists" not in str(e):
raise e
return client, embedding_model
def crawl_documentation(firecrawl_api_key: str, url: str, output_dir: Optional[str] = None):
firecrawl = FirecrawlApp(api_key=firecrawl_api_key)
pages = []
if output_dir:
os.makedirs(output_dir, exist_ok=True)
response = firecrawl.crawl_url(
url,
params={
'limit': 5,
'scrapeOptions': {
'formats': ['markdown', 'html']
}
}
)
while True:
for page in response.get('data', []):
content = page.get('markdown') or page.get('html', '')
metadata = page.get('metadata', {})
source_url = metadata.get('sourceURL', '')
if output_dir and content:
filename = f"{uuid.uuid4()}.md"
filepath = os.path.join(output_dir, filename)
with open(filepath, 'w', encoding='utf-8') as f:
f.write(content)
pages.append({
"content": content,
"url": source_url,
"metadata": {
"title": metadata.get('title', ''),
"description": metadata.get('description', ''),
"language": metadata.get('language', 'en'),
"crawl_date": datetime.now().isoformat()
}
})
next_url = response.get('next')
if not next_url:
break
response = firecrawl.get(next_url)
time.sleep(1)
return pages
def store_embeddings(client: QdrantClient, embedding_model: TextEmbedding, pages: List[Dict], collection_name: str):
for page in pages:
embedding = list(embedding_model.embed([page["content"]]))[0]
client.upsert(
collection_name=collection_name,
points=[
models.PointStruct(
id=str(uuid.uuid4()),
vector=embedding.tolist(),
payload={
"content": page["content"],
"url": page["url"],
**page["metadata"]
}
)
]
)
def setup_agents(openai_api_key: str):
os.environ["OPENAI_API_KEY"] = openai_api_key
processor_agent = Agent(
name="Documentation Processor",
instructions="""You are a helpful documentation assistant. Your task is to:
1. Analyze the provided documentation content
2. Answer the user's question clearly and concisely
3. Include relevant examples when available
4. Cite the source URLs when referencing specific content
5. Keep responses natural and conversational
6. Format your response in a way that's easy to speak out loud""",
model="gpt-4o"
)
tts_agent = Agent(
name="Text-to-Speech Agent",
instructions="""You are a text-to-speech agent. Your task is to:
1. Convert the processed documentation response into natural speech
2. Maintain proper pacing and emphasis
3. Handle technical terms clearly
4. Keep the tone professional but friendly
5. Use appropriate pauses for better comprehension
6. Ensure the speech is clear and well-articulated""",
model="gpt-4o-mini-tts"
)
return processor_agent, tts_agent
async def process_query(
query: str,
client: QdrantClient,
embedding_model: TextEmbedding,
processor_agent: Agent,
tts_agent: Agent,
collection_name: str,
openai_api_key: str
):
try:
query_embedding = list(embedding_model.embed([query]))[0]
search_response = client.query_points(
collection_name=collection_name,
query=query_embedding.tolist(),
limit=3,
with_payload=True
)
search_results = search_response.points if hasattr(search_response, 'points') else []
if not search_results:
raise Exception("No relevant documents found in the vector database")
context = "Based on the following documentation:\n\n"
for result in search_results:
payload = result.payload
if not payload:
continue
url = payload.get('url', 'Unknown URL')
content = payload.get('content', '')
context += f"From {url}:\n{content}\n\n"
context += f"\nUser Question: {query}\n\n"
context += "Please provide a clear, concise answer that can be easily spoken out loud."
processor_result = await Runner.run(processor_agent, context)
processor_response = processor_result.final_output
tts_result = await Runner.run(tts_agent, processor_response)
tts_response = tts_result.final_output
async_openai = AsyncOpenAI(api_key=openai_api_key)
audio_response = await async_openai.audio.speech.create(
model="gpt-4o-mini-tts",
voice=st.session_state.selected_voice,
input=processor_response,
instructions=tts_response,
response_format="mp3"
)
temp_dir = tempfile.gettempdir()
audio_path = os.path.join(temp_dir, f"response_{uuid.uuid4()}.mp3")
with open(audio_path, "wb") as f:
f.write(audio_response.content)
return {
"status": "success",
"text_response": processor_response,
"tts_instructions": tts_response,
"audio_path": audio_path,
"sources": [r.payload.get("url", "Unknown URL") for r in search_results if r.payload],
"query_details": {
"vector_size": len(query_embedding),
"results_found": len(search_results),
"collection_name": collection_name
}
}
except Exception as e:
return {
"status": "error",
"error": str(e),
"query": query
}
def run_streamlit():
st.set_page_config(
page_title="Customer Support Voice Agent",
page_icon="🎙️",
layout="wide"
)
init_session_state()
sidebar_config()
st.title("🎙️ Customer Support Voice Agent")
st.markdown("""
Get OpenAI SDK voice-powered answers to your documentation questions! Simply:
1. Configure your API keys in the sidebar
2. Enter the documentation URL you want to learn about or have questions about
3. Ask your question below and get both text and voice responses
""")
query = st.text_input(
"What would you like to know about the documentation?",
placeholder="e.g., How do I authenticate API requests?",
disabled=not st.session_state.setup_complete
)
if query and st.session_state.setup_complete:
with st.status("Processing your query...", expanded=True) as status:
try:
st.markdown("🔄 Searching documentation and generating response...")
result = asyncio.run(process_query(
query,
st.session_state.client,
st.session_state.embedding_model,
st.session_state.processor_agent,
st.session_state.tts_agent,
"docs_embeddings",
st.session_state.openai_api_key
))
if result["status"] == "success":
status.update(label="✅ Query processed!", state="complete")
st.markdown("### Response:")
st.write(result["text_response"])
if "audio_path" in result:
st.markdown(f"### 🔊 Audio Response (Voice: {st.session_state.selected_voice})")
st.audio(result["audio_path"], format="audio/mp3", start_time=0)
with open(result["audio_path"], "rb") as audio_file:
audio_bytes = audio_file.read()
st.download_button(
label="📥 Download Audio Response",
data=audio_bytes,
file_name=f"voice_response_{st.session_state.selected_voice}.mp3",
mime="audio/mp3"
)
st.markdown("### Sources:")
for source in result["sources"]:
st.markdown(f"- {source}")
else:
status.update(label="❌ Error processing query", state="error")
st.error(f"Error: {result.get('error', 'Unknown error occurred')}")
except Exception as e:
status.update(label="❌ Error processing query", state="error")
st.error(f"Error processing query: {str(e)}")
elif not st.session_state.setup_complete:
st.info("👈 Please configure the system using the sidebar first!")
if __name__ == "__main__":
run_streamlit() | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/voice_ai_agents/voice_rag_openaisdk/rag_voice.py | voice_ai_agents/voice_rag_openaisdk/rag_voice.py | from typing import List, Dict, Optional, Tuple
import os
import tempfile
from datetime import datetime
import uuid
import asyncio
import streamlit as st
from dotenv import load_dotenv
from qdrant_client import QdrantClient
from qdrant_client.http import models
from qdrant_client.http.models import Distance, VectorParams
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader
from fastembed import TextEmbedding
from openai import AsyncOpenAI
from openai.helpers import LocalAudioPlayer
from agents import Agent, Runner
load_dotenv()
# Constants
COLLECTION_NAME = "voice-rag-agent"
def init_session_state() -> None:
"""Initialize Streamlit session state with default values."""
defaults = {
"initialized": False,
"qdrant_url": "",
"qdrant_api_key": "",
"openai_api_key": "",
"setup_complete": False,
"client": None,
"embedding_model": None,
"processor_agent": None,
"tts_agent": None,
"selected_voice": "coral",
"processed_documents": []
}
for key, value in defaults.items():
if key not in st.session_state:
st.session_state[key] = value
def setup_sidebar() -> None:
"""Configure sidebar with API settings and voice options."""
with st.sidebar:
st.title("🔑 Configuration")
st.markdown("---")
st.session_state.qdrant_url = st.text_input(
"Qdrant URL",
value=st.session_state.qdrant_url,
type="password"
)
st.session_state.qdrant_api_key = st.text_input(
"Qdrant API Key",
value=st.session_state.qdrant_api_key,
type="password"
)
st.session_state.openai_api_key = st.text_input(
"OpenAI API Key",
value=st.session_state.openai_api_key,
type="password"
)
st.markdown("---")
st.markdown("### 🎤 Voice Settings")
voices = ["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
st.session_state.selected_voice = st.selectbox(
"Select Voice",
options=voices,
index=voices.index(st.session_state.selected_voice),
help="Choose the voice for the audio response"
)
def setup_qdrant() -> Tuple[QdrantClient, TextEmbedding]:
"""Initialize Qdrant client and embedding model."""
if not all([st.session_state.qdrant_url, st.session_state.qdrant_api_key]):
raise ValueError("Qdrant credentials not provided")
client = QdrantClient(
url=st.session_state.qdrant_url,
api_key=st.session_state.qdrant_api_key
)
embedding_model = TextEmbedding()
test_embedding = list(embedding_model.embed(["test"]))[0]
embedding_dim = len(test_embedding)
try:
client.create_collection(
collection_name=COLLECTION_NAME,
vectors_config=VectorParams(
size=embedding_dim,
distance=Distance.COSINE
)
)
except Exception as e:
if "already exists" not in str(e):
raise e
return client, embedding_model
def process_pdf(file) -> List:
"""Process PDF file and split into chunks with metadata."""
try:
with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
tmp_file.write(file.getvalue())
loader = PyPDFLoader(tmp_file.name)
documents = loader.load()
# Add source metadata
for doc in documents:
doc.metadata.update({
"source_type": "pdf",
"file_name": file.name,
"timestamp": datetime.now().isoformat()
})
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
return text_splitter.split_documents(documents)
except Exception as e:
st.error(f"📄 PDF processing error: {str(e)}")
return []
def store_embeddings(
client: QdrantClient,
embedding_model: TextEmbedding,
documents: List,
collection_name: str
) -> None:
"""Store document embeddings in Qdrant."""
for doc in documents:
embedding = list(embedding_model.embed([doc.page_content]))[0]
client.upsert(
collection_name=collection_name,
points=[
models.PointStruct(
id=str(uuid.uuid4()),
vector=embedding.tolist(),
payload={
"content": doc.page_content,
**doc.metadata
}
)
]
)
def setup_agents(openai_api_key: str) -> Tuple[Agent, Agent]:
"""Initialize the processor and TTS agents."""
os.environ["OPENAI_API_KEY"] = openai_api_key
processor_agent = Agent(
name="Documentation Processor",
instructions="""You are a helpful documentation assistant. Your task is to:
1. Analyze the provided documentation content
2. Answer the user's question clearly and concisely
3. Include relevant examples when available
4. Cite the source files when referencing specific content
5. Keep responses natural and conversational
6. Format your response in a way that's easy to speak out loud""",
model="gpt-4o"
)
tts_agent = Agent(
name="Text-to-Speech Agent",
instructions="""You are a text-to-speech agent. Your task is to:
1. Convert the processed documentation response into natural speech
2. Maintain proper pacing and emphasis
3. Handle technical terms clearly
4. Keep the tone professional but friendly
5. Use appropriate pauses for better comprehension
6. Ensure the speech is clear and well-articulated""",
model="gpt-4o"
)
return processor_agent, tts_agent
async def process_query(
query: str,
client: QdrantClient,
embedding_model: TextEmbedding,
collection_name: str,
openai_api_key: str,
voice: str
) -> Dict:
"""Process user query and generate voice response."""
try:
st.info("🔄 Step 1: Generating query embedding and searching documents...")
# Get query embedding and search
query_embedding = list(embedding_model.embed([query]))[0]
st.write(f"Generated embedding of size: {len(query_embedding)}")
search_response = client.query_points(
collection_name=collection_name,
query=query_embedding.tolist(),
limit=3,
with_payload=True
)
search_results = search_response.points if hasattr(search_response, 'points') else []
st.write(f"Found {len(search_results)} relevant documents")
if not search_results:
raise Exception("No relevant documents found in the vector database")
st.info("🔄 Step 2: Preparing context from search results...")
# Prepare context from search results
context = "Based on the following documentation:\n\n"
for i, result in enumerate(search_results, 1):
payload = result.payload
if not payload:
continue
content = payload.get('content', '')
source = payload.get('file_name', 'Unknown Source')
context += f"From {source}:\n{content}\n\n"
st.write(f"Document {i} from: {source}")
context += f"\nUser Question: {query}\n\n"
context += "Please provide a clear, concise answer that can be easily spoken out loud."
st.info("🔄 Step 3: Setting up agents...")
# Setup agents if not already done
if not st.session_state.processor_agent or not st.session_state.tts_agent:
processor_agent, tts_agent = setup_agents(openai_api_key)
st.session_state.processor_agent = processor_agent
st.session_state.tts_agent = tts_agent
st.write("Initialized new processor and TTS agents")
else:
st.write("Using existing agents")
st.info("🔄 Step 4: Generating text response...")
# Generate text response using processor agent
processor_result = await Runner.run(st.session_state.processor_agent, context)
text_response = processor_result.final_output
st.write(f"Generated text response of length: {len(text_response)}")
st.info("🔄 Step 5: Generating voice instructions...")
# Generate voice instructions using TTS agent
tts_result = await Runner.run(st.session_state.tts_agent, text_response)
voice_instructions = tts_result.final_output
st.write(f"Generated voice instructions of length: {len(voice_instructions)}")
st.info("🔄 Step 6: Generating and playing audio...")
# Generate and play audio with streaming
async_openai = AsyncOpenAI(api_key=openai_api_key)
# First create streaming response
async with async_openai.audio.speech.with_streaming_response.create(
model="gpt-4o-mini-tts",
voice=voice,
input=text_response,
instructions=voice_instructions,
response_format="pcm",
) as stream_response:
st.write("Starting audio playback...")
# Play audio directly using LocalAudioPlayer
await LocalAudioPlayer().play(stream_response)
st.write("Audio playback complete")
st.write("Generating downloadable MP3 version...")
# Also save as MP3 for download
audio_response = await async_openai.audio.speech.create(
model="gpt-4o-mini-tts",
voice=voice,
input=text_response,
instructions=voice_instructions,
response_format="mp3"
)
temp_dir = tempfile.gettempdir()
audio_path = os.path.join(temp_dir, f"response_{uuid.uuid4()}.mp3")
with open(audio_path, "wb") as f:
f.write(audio_response.content)
st.write(f"Saved MP3 file to: {audio_path}")
st.success("✅ Query processing complete!")
return {
"status": "success",
"text_response": text_response,
"voice_instructions": voice_instructions,
"audio_path": audio_path,
"sources": [r.payload.get('file_name', 'Unknown Source') for r in search_results if r.payload]
}
except Exception as e:
st.error(f"❌ Error during query processing: {str(e)}")
return {
"status": "error",
"error": str(e),
"query": query
}
def main() -> None:
"""Main application function."""
st.set_page_config(
page_title="Voice RAG Agent",
page_icon="🎙️",
layout="wide"
)
init_session_state()
setup_sidebar()
st.title("🎙️ Voice RAG Agent")
st.info("Get voice-powered answers to your documentation questions by configuring your API keys and uploading PDF documents. Then, simply ask questions to receive both text and voice responses!")
# File upload section
uploaded_file = st.file_uploader("Upload PDF", type=["pdf"])
if uploaded_file:
file_name = uploaded_file.name
if file_name not in st.session_state.processed_documents:
with st.spinner('Processing PDF...'):
try:
# Setup Qdrant if not already done
if not st.session_state.client:
client, embedding_model = setup_qdrant()
st.session_state.client = client
st.session_state.embedding_model = embedding_model
# Process and store document
documents = process_pdf(uploaded_file)
if documents:
store_embeddings(
st.session_state.client,
st.session_state.embedding_model,
documents,
COLLECTION_NAME
)
st.session_state.processed_documents.append(file_name)
st.success(f"✅ Added PDF: {file_name}")
st.session_state.setup_complete = True
except Exception as e:
st.error(f"Error processing document: {str(e)}")
# Display processed documents
if st.session_state.processed_documents:
st.sidebar.header("📚 Processed Documents")
for doc in st.session_state.processed_documents:
st.sidebar.text(f"📄 {doc}")
# Query interface
query = st.text_input(
"What would you like to know about the documentation?",
placeholder="e.g., How do I authenticate API requests?",
disabled=not st.session_state.setup_complete
)
if query and st.session_state.setup_complete:
with st.status("Processing your query...", expanded=True) as status:
try:
result = asyncio.run(process_query(
query,
st.session_state.client,
st.session_state.embedding_model,
COLLECTION_NAME,
st.session_state.openai_api_key,
st.session_state.selected_voice
))
if result["status"] == "success":
status.update(label="✅ Query processed!", state="complete")
st.markdown("### Response:")
st.write(result["text_response"])
if "audio_path" in result:
st.markdown(f"### 🔊 Audio Response (Voice: {st.session_state.selected_voice})")
st.audio(result["audio_path"], format="audio/mp3", start_time=0)
with open(result["audio_path"], "rb") as audio_file:
audio_bytes = audio_file.read()
st.download_button(
label="📥 Download Audio Response",
data=audio_bytes,
file_name=f"voice_response_{st.session_state.selected_voice}.mp3",
mime="audio/mp3"
)
st.markdown("### Sources:")
for source in result["sources"]:
st.markdown(f"- {source}")
else:
status.update(label="❌ Error processing query", state="error")
st.error(f"Error: {result.get('error', 'Unknown error occurred')}")
except Exception as e:
status.update(label="❌ Error processing query", state="error")
st.error(f"Error processing query: {str(e)}")
elif not st.session_state.setup_complete:
st.info("👈 Please configure the system and upload documents first!")
if __name__ == "__main__":
main() | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/voice_ai_agents/ai_audio_tour_agent/printer.py | voice_ai_agents/ai_audio_tour_agent/printer.py | from typing import Any
from rich.console import Console, Group
from rich.live import Live
from rich.spinner import Spinner
class Printer:
"""
Simple wrapper to stream status updates. Used by the financial bot
manager as it orchestrates planning, search and writing.
"""
def __init__(self, console: Console) -> None:
self.live = Live(console=console)
self.items: dict[str, tuple[str, bool]] = {}
self.hide_done_ids: set[str] = set()
self.live.start()
def end(self) -> None:
self.live.stop()
def hide_done_checkmark(self, item_id: str) -> None:
self.hide_done_ids.add(item_id)
def update_item(
self, item_id: str, content: str, is_done: bool = False, hide_checkmark: bool = False
) -> None:
self.items[item_id] = (content, is_done)
if hide_checkmark:
self.hide_done_ids.add(item_id)
self.flush()
def mark_item_done(self, item_id: str) -> None:
self.items[item_id] = (self.items[item_id][0], True)
self.flush()
def flush(self) -> None:
renderables: list[Any] = []
for item_id, (content, is_done) in self.items.items():
if is_done:
prefix = "✅ " if item_id not in self.hide_done_ids else ""
renderables.append(prefix + content)
else:
renderables.append(Spinner("dots", text=content))
self.live.update(Group(*renderables))
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/voice_ai_agents/ai_audio_tour_agent/ai_audio_tour_agent.py | voice_ai_agents/ai_audio_tour_agent/ai_audio_tour_agent.py | import streamlit as st
import asyncio
from manager import TourManager
from agents import set_default_openai_key
import json
def tts(text):
from pathlib import Path
from openai import OpenAI
client = OpenAI()
speech_file_path = Path(__file__).parent / f"speech_tour.mp3"
response = client.audio.speech.create(
model="gpt-4o-mini-tts",
voice="nova",
input=text,
instructions="""You are a friendly and engaging tour guide. Speak naturally and conversationally, as if you're walking alongside the visitor.
Use a warm, inviting tone throughout. Avoid robotic or formal language. Make the tour feel like a casual conversation with a knowledgeable friend.
Use natural transitions between topics and maintain an enthusiastic but relaxed pace."""
)
response.stream_to_file(speech_file_path)
return speech_file_path
def run_async(func, *args, **kwargs):
try:
return asyncio.run(func(*args, **kwargs))
except RuntimeError:
loop = asyncio.get_event_loop()
return loop.run_until_complete(func(*args, **kwargs))
# Set page config for a better UI
st.set_page_config(
page_title="AI Audio Tour Agent",
page_icon="🎧",
layout="wide",
initial_sidebar_state="collapsed"
)
# Sidebar for API key
with st.sidebar:
st.title("🔑 Settings")
api_key = st.text_input("OpenAI API Key:", type="password")
if api_key:
st.session_state["OPENAI_API_KEY"] = api_key
st.success("API key saved!")
set_default_openai_key(api_key)
# Main content
st.title("🎧 AI Audio Tour Agent")
st.markdown("""
<div class='welcome-card'>
<h3>Welcome to your personalized audio tour guide!</h3>
<p>I'll help you explore any location with an engaging, natural-sounding tour tailored to your interests.</p>
</div>
""", unsafe_allow_html=True)
# Create a clean layout with cards
col1, col2 = st.columns([2, 1])
with col1:
st.markdown("### 📍 Where would you like to explore?")
location = st.text_input("", placeholder="Enter a city, landmark, or location...")
st.markdown("### 🎯 What interests you?")
interests = st.multiselect(
"",
options=["History", "Architecture", "Culinary", "Culture"],
default=["History", "Architecture"],
help="Select the topics you'd like to learn about"
)
with col2:
st.markdown("### ⏱️ Tour Settings")
duration = st.slider(
"Tour Duration (minutes)",
min_value=5,
max_value=60,
value=10,
step=5,
help="Choose how long you'd like your tour to be"
)
st.markdown("### 🎙️ Voice Settings")
voice_style = st.selectbox(
"Guide's Voice Style",
options=["Friendly & Casual", "Professional & Detailed", "Enthusiastic & Energetic"],
help="Select the personality of your tour guide"
)
# Generate Tour Button
if st.button("🎧 Generate Tour", type="primary"):
if "OPENAI_API_KEY" not in st.session_state:
st.error("Please enter your OpenAI API key in the sidebar.")
elif not location:
st.error("Please enter a location.")
elif not interests:
st.error("Please select at least one interest.")
else:
with st.spinner(f"Creating your personalized tour of {location}..."):
mgr = TourManager()
final_tour = run_async(
mgr.run, location, interests, duration
)
# Display the tour content in an expandable section
with st.expander("📝 Tour Content", expanded=True):
st.markdown(final_tour)
# Add a progress bar for audio generation
with st.spinner("🎙️ Generating audio tour..."):
progress_bar = st.progress(0)
tour_audio = tts(final_tour)
progress_bar.progress(100)
# Display audio player with custom styling
st.markdown("### 🎧 Listen to Your Tour")
st.audio(tour_audio, format="audio/mp3")
# Add download button for the audio
with open(tour_audio, "rb") as file:
st.download_button(
label="📥 Download Audio Tour",
data=file,
file_name=f"{location.lower().replace(' ', '_')}_tour.mp3",
mime="audio/mp3"
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/voice_ai_agents/ai_audio_tour_agent/agent.py | voice_ai_agents/ai_audio_tour_agent/agent.py | from pydantic import BaseModel
from agents import Agent, WebSearchTool
from agents.model_settings import ModelSettings
ARCHITECTURE_AGENT_INSTRUCTIONS = ("""
You are the Architecture agent for a self-guided audio tour system. Given a location and the areas of interest of user, your role is to:
1. Describe architectural styles, notable buildings, urban planning, and design elements
2. Provide technical insights balanced with accessible explanations
3. Highlight the most visually striking or historically significant structures
4. Adopt a detailed, descriptive voice style when delivering architectural content
5. Make sure not to add any headings like ## Architecture. Just provide the content
6. Make sure the details are conversational and don't include any formatting or headings. It will be directly used in a audio model for converting to speech and the entire content should feel like natural speech.
7. Make sure the content is strictly between the upper and lower Word Limit as specified. For example, If the word limit is 100 to 120, it should be within that, not less than 100 or greater than 120
NOTE: Given a location, use web search to retrieve up‑to‑date context and architectural information about the location
NOTE: Do not add any Links or Hyperlinks in your answer or never cite any source
Help users see and appreciate architectural details they might otherwise miss. Make it as detailed and elaborative as possible
""")
class Architecture(BaseModel):
output: str
architecture_agent = Agent(
name="ArchitectureAgent",
instructions=ARCHITECTURE_AGENT_INSTRUCTIONS,
model="gpt-4o-mini",
tools=[WebSearchTool()],
model_settings=ModelSettings(tool_choice="required"),
output_type=Architecture
)
CULINARY_AGENT_INSTRUCTIONS = ("""
You are the Culinary agent for a self-guided audio tour system. Given a location and the areas of interest of user, your role is to:
1. Highlight local food specialties, restaurants, markets, and culinary traditions in the user's location
2. Explain the historical and cultural significance of local dishes and ingredients
3. Suggest food stops suitable for the tour duration
4. Adopt an enthusiastic, passionate voice style when delivering culinary content
5. Make sure not to add any headings like ## Culinary. Just provide the content
6. Make sure the details are conversational and don't include any formatting or headings. It will be directly used in a audio model for converting to speech and the entire content should feel like natural speech.
7. Make sure the content is strictly between the upper and lower Word Limit as specified. For example, If the word limit is 100 to 120, it should be within that, not less than 100 or greater than 120
NOTE: Given a location, use web search to retrieve up‑to‑date context and culinary information about the location
NOTE: Do not add any Links or Hyperlinks in your answer or never cite any source
Make your descriptions vivid and appetizing. Include practical information like operating hours when relevant. Make it as detailed and elaborative as possible
""")
class Culinary(BaseModel):
output: str
culinary_agent = Agent(
name="CulinaryAgent",
instructions=CULINARY_AGENT_INSTRUCTIONS,
model="gpt-4o-mini",
tools=[WebSearchTool()],
model_settings=ModelSettings(tool_choice="required"),
output_type=Culinary
)
CULTURE_AGENT_INSTRUCTIONS = ("""
You are the Culture agent for a self-guided audio tour system. Given a location and the areas of interest of user, your role is to:
1. Provide information about local traditions, customs, arts, music, and cultural practices
2. Highlight cultural venues and events relevant to the user's interests
3. Explain cultural nuances and significance that enhance the visitor's understanding
4. Adopt a warm, respectful voice style when delivering cultural content
5. Make sure not to add any headings like ## Culture. Just provide the content
6. Make sure the details are conversational and don't include any formatting or headings. It will be directly used in a audio model for converting to speech and the entire content should feel like natural speech.
7. Make sure the content is strictly between the upper and lower Word Limit as specified. For example, If the word limit is 100 to 120, it should be within that, not less than 100 or greater than 120
NOTE: Given a location, use web search to retrieve up‑to‑date context and all the cultural information about the location
NOTE: Do not add any Links or Hyperlinks in your answer or never cite any source
Focus on authentic cultural insights that help users appreciate local ways of life. Make it as detailed and elaborative as possible
""")
class Culture(BaseModel):
output: str
culture_agent = Agent(
name="CulturalAgent",
instructions=CULTURE_AGENT_INSTRUCTIONS,
model="gpt-4o-mini",
tools=[WebSearchTool()],
model_settings=ModelSettings(tool_choice="required"),
output_type=Culture
)
HISTORY_AGENT_INSTRUCTIONS = ("""
You are the History agent for a self-guided audio tour system. Given a location and the areas of interest of user, your role is to:
1. Provide historically accurate information about landmarks, events, and people related to the user's location
2. Prioritize the most significant historical aspects based on the user's time constraints
3. Include interesting historical facts and stories that aren't commonly known
4. Adopt an authoritative, professorial voice style when delivering historical content
5. Make sure not to add any headings like ## History. Just provide the content
6. Make sure the details are conversational and don't include any formatting or headings. It will be directly used in a audio model for converting to speech and the entire content should feel like natural speech.
7. Make sure the content is strictly between the upper and lower Word Limit as specified. For example, If the word limit is 100 to 120, it should be within that, not less than 100 or greater than 120
NOTE: Given a location, use web search to retrieve up‑to‑date context and historical information about the location
NOTE: Do not add any Links or Hyperlinks in your answer or never cite any source
Focus on making history come alive through engaging narratives. Keep descriptions concise but informative. Make it as detailed and elaborative as possible
""")
class History(BaseModel):
output: str
historical_agent = Agent(
name="HistoricalAgent",
instructions=HISTORY_AGENT_INSTRUCTIONS,
model="gpt-4o-mini",
output_type=History,
tools=[WebSearchTool()],
model_settings=ModelSettings(tool_choice="required"),
)
ORCHESTRATOR_INSTRUCTIONS = ("""
Your Role
You are the Orchestrator Agent for a self-guided audio tour system. Your task is to assemble a comprehensive and engaging tour for a single location by integrating pre-timed content from four specialist agents (Architecture, History, Culinary, and Culture), while adding introduction and conclusion elements.
Input Parameters
- User Location: The specific location for the tour (e.g., a landmark, neighborhood, or district)
- User Interests: User's preference across categories (Architecture, History, Culinary, Culture)
- Specialist Agent Outputs: Pre-sized content from each domain expert (Architecture, History, Culinary, Culture)
- Specialist Agent Word Limit: Word Limit from each domain expert (Architecture, History, Culinary, Culture)
Your Tasks
1. Introduction Creation (1-2 minutes)
Create an engaging and warm introduction that:
- Welcomes the user to the specific location
- Briefly outlines what the tour will cover
- Highlights which categories are emphasized based on user interests
- Sets the tone for the experience (conversational and immersive)
2. Content Integration with Deduplication
Integrate the content from all four agents in the correct order:
- Architecture → History → Culture → Culinary
- Maintain each agent's voice and expertise
- Ensure all content fits within its allocated time budget
- Don't edit anything from your end and just accumulate the content from the specialised agents
3. Transition Development
Develop smooth transitions between the sections:
- Use natural language to move from one domain to another
- Connect themes when possible (e.g., how architecture influenced culture, or how history shaped food)
4. Conclusion Creation
Write a thoughtful concise and short conclusion that:
- Summarizes key highlights from the tour
- Reinforces the uniqueness of the location
- Connects the explored themes holistically
- Encourages the listener to explore further based on their interests
5. Final Assembly
Assemble the complete tour in the following order:
- Introduction
- Architecture
- History
- Culture
- Culinary
- Conclusion
Ensure:
- Transitions are smooth
- Content is free from redundancy
- Total duration respects the time allocation plan
- The entire output sounds like one cohesive guided experience
""")
class FinalTour(BaseModel):
introduction: str
"""A short introduction of the Tour."""
architecture: str
"""The Architectural Content"""
history: str
"""The Historical Content"""
culture: str
"""The Culture Content"""
culinary: str
"""The Culinary Content"""
conclusion: str
"""A short conclusion of the Tour."""
orchestrator_agent = Agent(
name="OrchestratorAgent",
instructions=ORCHESTRATOR_INSTRUCTIONS,
model="gpt-4o-mini",
output_type=FinalTour,
)
PLANNER_INSTRUCTIONS = ("""
Your Role
You are the Planner Agent for a self-guided tour system. Your primary responsibility is to analyze the user's location, interests, and requested tour duration to create an optimal time allocation plan for content generation by specialist agents (Architecture, History, Culture, and Culinary).
Input Parameters
User Location: The specific location for the tour
User Interests: User's ranked preferences across categories (Architecture, History, Culture, Culinary)
Tour Duration: User's selected time (15, 30, or 60 minutes)
Your Tasks
1. Interest Analysis
Evaluate the user's interest preferences
Assign weight to each category based on expressed interest level
If no specific preferences are provided, assume equal interest in all categories
2. Location Assessment
Analyze the significance of the specified location for each category
Determine if the location has stronger relevance in particular categories
Example: A cathedral might warrant more time for Architecture and History than Culinary
3. Time Allocation Calculation
Calculate the total content time (excluding introduction and conclusion)
Reserve 1-2 minutes for introduction and 1 minute for conclusion
Distribute the remaining time among the four categories based on:
User interest weights (primary factor)
Location relevance to each category (secondary factor)
Ensure minimum time thresholds for each category (even low-interest categories get some coverage)
4. Scaling for Different Durations
15-minute tour:
Introduction: ~1 minute
Content sections: ~12-13 minutes total (divided among categories)
Conclusion: ~1 minute
Each category gets at least 1 minute, with preferred categories getting more
30-minute tour:
Introduction: ~1.5 minutes
Content sections: ~27 minutes total (divided among categories)
Conclusion: ~1.5 minutes
Each category gets at least 3 minutes, with preferred categories getting more
60-minute tour:
Introduction: ~2 minutes
Content sections: ~56 minutes total (divided among categories)
Conclusion: ~2 minutes
Each category gets at least 5 minutes, with preferred categories getting more
Your output must be a JSON object with numeric time allocations (in minutes) for each section:
- introduction
- architecture
- history
- culture
- culinary
- conclusion
Only return the number of minutes allocated to each section. Do not include explanations or text descriptions.
Example:
{
"introduction": 2,
"architecture": 15,
"history": 20,
"culture": 10,
"culinary": 9,
"conclusion": 2
}
Make sure the time allocation adheres to the interests, and the interested section is allocated more time than others.
""")
class Planner(BaseModel):
introduction: float
architecture: float
history: float
culture: float
culinary: float
conclusion: float
planner_agent = Agent(
name="PlannerAgent",
instructions=PLANNER_INSTRUCTIONS,
model="gpt-4o",
output_type=Planner,
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/voice_ai_agents/ai_audio_tour_agent/manager.py | voice_ai_agents/ai_audio_tour_agent/manager.py | from __future__ import annotations
import asyncio
import time
import json
from collections.abc import Sequence
from rich.console import Console
from agents import Runner, RunResult, custom_span, gen_trace_id, trace
from agent import History, historical_agent
from agent import Culinary,culinary_agent
from agent import Culture,culture_agent
from agent import Architecture,architecture_agent
from agent import Planner, planner_agent
from agent import FinalTour, orchestrator_agent
from printer import Printer
class TourManager:
"""
Orchestrates the full flow
"""
def __init__(self) -> None:
self.console = Console()
self.printer = Printer(self.console)
async def run(self, query: str, interests: list, duration: str) -> None:
trace_id = gen_trace_id()
with trace("Tour Research trace", trace_id=trace_id):
self.printer.update_item(
"trace_id",
"View trace: https://platform.openai.com/traces/{}".format(trace_id),
is_done=True,
hide_checkmark=True,
)
self.printer.update_item("start", "Starting tour research...", is_done=True)
# Get plan based on selected interests
planner = await self._get_plan(query, interests, duration)
# Initialize research results
research_results = {}
# Calculate word limits based on duration
# Assuming average speaking rate of 150 words per minute
words_per_minute = 150
total_words = int(duration) * words_per_minute
words_per_section = total_words // len(interests)
# Only research selected interests
if "Architecture" in interests:
research_results["architecture"] = await self._get_architecture(query, interests, words_per_section)
if "History" in interests:
research_results["history"] = await self._get_history(query, interests, words_per_section)
if "Culinary" in interests:
research_results["culinary"] = await self._get_culinary(query, interests, words_per_section)
if "Culture" in interests:
research_results["culture"] = await self._get_culture(query, interests, words_per_section)
# Get final tour with only selected interests
final_tour = await self._get_final_tour(
query,
interests,
duration,
research_results
)
self.printer.update_item("final_report", "", is_done=True)
self.printer.end()
# Build final tour content based on selected interests
sections = []
# Add selected interest sections without headers
if "Architecture" in interests:
sections.append(final_tour.architecture)
if "History" in interests:
sections.append(final_tour.history)
if "Culture" in interests:
sections.append(final_tour.culture)
if "Culinary" in interests:
sections.append(final_tour.culinary)
# Format final tour with natural transitions
final = ""
for i, content in enumerate(sections):
if i > 0:
final += "\n\n" # Add spacing between sections
final += content
return final
async def _get_plan(self, query: str, interests: list, duration: str) -> Planner:
self.printer.update_item("Planner", "Planning your personalized tour...")
result = await Runner.run(
planner_agent,
"Query: {} Interests: {} Duration: {}".format(query, ', '.join(interests), duration)
)
self.printer.update_item(
"Planner",
"Completed planning",
is_done=True,
)
return result.final_output_as(Planner)
async def _get_history(self, query: str, interests: list, word_limit: int) -> History:
self.printer.update_item("History", "Researching historical highlights...")
result = await Runner.run(
historical_agent,
"Query: {} Interests: {} Word Limit: {} - {}\n\nInstructions: Create engaging historical content for an audio tour. Focus on interesting stories and personal connections. Make it conversational and include specific details that would be interesting to hear while walking. Include specific locations and landmarks where possible. The content should be approximately {} words when spoken at a natural pace.".format(query, ', '.join(interests), word_limit, word_limit + 20, word_limit)
)
self.printer.update_item(
"History",
"Completed history research",
is_done=True,
)
return result.final_output_as(History)
async def _get_architecture(self, query: str, interests: list, word_limit: int):
self.printer.update_item("Architecture", "Exploring architectural wonders...")
result = await Runner.run(
architecture_agent,
"Query: {} Interests: {} Word Limit: {} - {}\n\nInstructions: Create engaging architectural content for an audio tour. Focus on visual descriptions and interesting design details. Make it conversational and include specific buildings and their unique features. Describe what visitors should look for and why it matters. The content should be approximately {} words when spoken at a natural pace.".format(query, ', '.join(interests), word_limit, word_limit + 20, word_limit)
)
self.printer.update_item(
"Architecture",
"Completed architecture research",
is_done=True,
)
return result.final_output_as(Architecture)
async def _get_culinary(self, query: str, interests: list, word_limit: int):
self.printer.update_item("Culinary", "Discovering local flavors...")
result = await Runner.run(
culinary_agent,
"Query: {} Interests: {} Word Limit: {} - {}\n\nInstructions: Create engaging culinary content for an audio tour. Focus on local specialties, food history, and interesting stories about restaurants and dishes. Make it conversational and include specific recommendations. Describe the flavors and cultural significance of the food. The content should be approximately {} words when spoken at a natural pace.".format(query, ', '.join(interests), word_limit, word_limit + 20, word_limit)
)
self.printer.update_item(
"Culinary",
"Completed culinary research",
is_done=True,
)
return result.final_output_as(Culinary)
async def _get_culture(self, query: str, interests: list, word_limit: int):
self.printer.update_item("Culture", "Exploring cultural highlights...")
result = await Runner.run(
culture_agent,
"Query: {} Interests: {} Word Limit: {} - {}\n\nInstructions: Create engaging cultural content for an audio tour. Focus on local traditions, arts, and community life. Make it conversational and include specific cultural venues and events. Describe the atmosphere and significance of cultural landmarks. The content should be approximately {} words when spoken at a natural pace.".format(query, ', '.join(interests), word_limit, word_limit + 20, word_limit)
)
self.printer.update_item(
"Culture",
"Completed culture research",
is_done=True,
)
return result.final_output_as(Culture)
async def _get_final_tour(self, query: str, interests: list, duration: float, research_results: dict):
self.printer.update_item("Final Tour", "Creating your personalized tour...")
# Build content sections based on selected interests
content_sections = []
for interest in interests:
if interest.lower() in research_results:
content_sections.append(research_results[interest.lower()].output)
# Calculate total words based on duration
# Assuming average speaking rate of 150 words per minute
words_per_minute = 150
total_words = int(duration) * words_per_minute
# Create the prompt with proper string formatting
prompt = (
"Query: {}\n"
"Selected Interests: {}\n"
"Total Tour Duration (in minutes): {}\n"
"Target Word Count: {}\n\n"
"Content Sections:\n{}\n\n"
"Instructions: Create a natural, conversational audio tour that focuses only on the selected interests. "
"Make it feel like a friendly guide walking alongside the visitor, sharing interesting stories and insights. "
"Use natural transitions between topics and maintain an engaging but relaxed pace. "
"Include specific locations and landmarks where possible. "
"Add natural pauses and transitions as if walking between locations. "
"Use phrases like 'as we walk', 'look to your left', 'notice how', etc. "
"Make it interactive and engaging, as if the guide is actually there with the visitor. "
"Start with a warm welcome and end with a natural closing thought. "
"The total content should be approximately {} words when spoken at a natural pace of 150 words per minute. "
"This will ensure the tour lasts approximately {} minutes."
).format(
query,
', '.join(interests),
duration,
total_words,
'\n\n'.join(content_sections),
total_words,
duration
)
result = await Runner.run(
orchestrator_agent,
prompt
)
self.printer.update_item(
"Final Tour",
"Completed Final Tour Guide Creation",
is_done=True,
)
return result.final_output_as(FinalTour) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Z4nzu/hackingtool | https://github.com/Z4nzu/hackingtool/blob/7df27d8383095257e05c9dfd5af3ea696039d793/core.py | core.py | from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from rich import box
from rich.traceback import install
from rich.theme import Theme
import os
import sys
import webbrowser
from platform import system
from traceback import print_exc
from typing import Callable, List, Tuple
# Enable rich tracebacks
install()
_theme = Theme({"purple": "#7B61FF"})
console = Console(theme=_theme)
def clear_screen():
os.system("cls" if system() == "Windows" else "clear")
def validate_input(ip, val_range):
val_range = val_range or []
try:
ip = int(ip)
if ip in val_range:
return ip
except Exception:
return None
return None
class HackingTool(object):
TITLE: str = ""
DESCRIPTION: str = ""
INSTALL_COMMANDS: List[str] = []
INSTALLATION_DIR: str = ""
UNINSTALL_COMMANDS: List[str] = []
RUN_COMMANDS: List[str] = []
OPTIONS: List[Tuple[str, Callable]] = []
PROJECT_URL: str = ""
def __init__(self, options=None, installable=True, runnable=True):
options = options or []
if isinstance(options, list):
self.OPTIONS = []
if installable:
self.OPTIONS.append(("Install", self.install))
if runnable:
self.OPTIONS.append(("Run", self.run))
self.OPTIONS.extend(options)
else:
raise Exception("options must be a list of (option_name, option_fn) tuples")
def show_info(self):
desc = f"[cyan]{self.DESCRIPTION}[/cyan]"
if self.PROJECT_URL:
desc += f"\n[green]🔗 {self.PROJECT_URL}[/green]"
console.print(Panel(desc, title=f"[bold purple]{self.TITLE}[/bold purple]", border_style="purple", box=box.DOUBLE))
def show_options(self, parent=None):
clear_screen()
self.show_info()
table = Table(title="Options", box=box.SIMPLE_HEAVY)
table.add_column("No.", style="bold cyan", justify="center")
table.add_column("Action", style="bold yellow")
for index, option in enumerate(self.OPTIONS):
table.add_row(str(index + 1), option[0])
if self.PROJECT_URL:
table.add_row("98", "Open Project Page")
table.add_row("99", f"Back to {parent.TITLE if parent else 'Exit'}")
console.print(table)
option_index = input("\n[?] Select an option: ").strip()
try:
option_index = int(option_index)
if option_index - 1 in range(len(self.OPTIONS)):
ret_code = self.OPTIONS[option_index - 1][1]()
if ret_code != 99:
input("\nPress [Enter] to continue...")
elif option_index == 98:
self.show_project_page()
elif option_index == 99:
if parent is None:
sys.exit()
return 99
except (TypeError, ValueError):
console.print("[red]⚠ Please enter a valid option.[/red]")
input("\nPress [Enter] to continue...")
except Exception:
console.print_exception(show_locals=True)
input("\nPress [Enter] to continue...")
return self.show_options(parent=parent)
def before_install(self): pass
def install(self):
self.before_install()
if isinstance(self.INSTALL_COMMANDS, (list, tuple)):
for INSTALL_COMMAND in self.INSTALL_COMMANDS:
console.print(f"[yellow]→ {INSTALL_COMMAND}[/yellow]")
os.system(INSTALL_COMMAND)
self.after_install()
def after_install(self):
console.print("[green]✔ Successfully installed![/green]")
def before_uninstall(self) -> bool:
return True
def uninstall(self):
if self.before_uninstall():
if isinstance(self.UNINSTALL_COMMANDS, (list, tuple)):
for UNINSTALL_COMMAND in self.UNINSTALL_COMMANDS:
console.print(f"[red]→ {UNINSTALL_COMMAND}[/red]")
os.system(UNINSTALL_COMMAND)
self.after_uninstall()
def after_uninstall(self): pass
def before_run(self): pass
def run(self):
self.before_run()
if isinstance(self.RUN_COMMANDS, (list, tuple)):
for RUN_COMMAND in self.RUN_COMMANDS:
console.print(f"[cyan]⚙ Running:[/cyan] [bold]{RUN_COMMAND}[/bold]")
os.system(RUN_COMMAND)
self.after_run()
def after_run(self): pass
def is_installed(self, dir_to_check=None):
console.print("[yellow]⚠ Unimplemented: DO NOT USE[/yellow]")
return "?"
def show_project_page(self):
console.print(f"[blue]🌐 Opening project page: {self.PROJECT_URL}[/blue]")
webbrowser.open_new_tab(self.PROJECT_URL)
class HackingToolsCollection(object):
TITLE: str = ""
DESCRIPTION: str = ""
TOOLS: List = []
def __init__(self):
pass
def show_info(self):
console.rule(f"[bold purple]{self.TITLE}[/bold purple]", style="purple")
console.print(f"[italic cyan]{self.DESCRIPTION}[/italic cyan]\n")
def show_options(self, parent=None):
clear_screen()
self.show_info()
table = Table(title="Available Tools", box=box.MINIMAL_DOUBLE_HEAD)
table.add_column("No.", justify="center", style="bold cyan")
table.add_column("Tool Name", style="bold yellow")
for index, tool in enumerate(self.TOOLS):
table.add_row(str(index), tool.TITLE)
table.add_row("99", f"Back to {parent.TITLE if parent else 'Exit'}")
console.print(table)
tool_index = input("\n[?] Choose a tool: ").strip()
try:
tool_index = int(tool_index)
if tool_index in range(len(self.TOOLS)):
ret_code = self.TOOLS[tool_index].show_options(parent=self)
if ret_code != 99:
input("\nPress [Enter] to continue...")
elif tool_index == 99:
if parent is None:
sys.exit()
return 99
except (TypeError, ValueError):
console.print("[red]⚠ Please enter a valid option.[/red]")
input("\nPress [Enter] to continue...")
except Exception:
console.print_exception(show_locals=True)
input("\nPress [Enter] to continue...")
return self.show_options(parent=parent)
| python | MIT | 7df27d8383095257e05c9dfd5af3ea696039d793 | 2026-01-04T14:39:20.027553Z | false |
Z4nzu/hackingtool | https://github.com/Z4nzu/hackingtool/blob/7df27d8383095257e05c9dfd5af3ea696039d793/hackingtool.py | hackingtool.py | #!/usr/bin/env python3
# Version 1.1.0 (rich UI - purple theme)
import os
import sys
import webbrowser
from platform import system
from time import sleep
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from rich.prompt import Prompt, IntPrompt, Confirm
from rich.align import Align
from rich.text import Text
from rich import box
from rich.columns import Columns
from rich.rule import Rule
from rich.padding import Padding
from core import HackingToolsCollection
from tools.anonsurf import AnonSurfTools
from tools.ddos import DDOSTools
from tools.exploit_frameworks import ExploitFrameworkTools
from tools.forensic_tools import ForensicTools
from tools.information_gathering_tools import InformationGatheringTools
from tools.other_tools import OtherTools
from tools.payload_creator import PayloadCreatorTools
from tools.phising_attack import PhishingAttackTools
from tools.post_exploitation import PostExploitationTools
from tools.remote_administration import RemoteAdministrationTools
from tools.reverse_engineering import ReverseEngineeringTools
from tools.sql_tools import SqlInjectionTools
from tools.steganography import SteganographyTools
from tools.tool_manager import ToolManager
from tools.webattack import WebAttackTools
from tools.wireless_attack_tools import WirelessAttackTools
from tools.wordlist_generator import WordlistGeneratorTools
from tools.xss_attack import XSSAttackTools
console = Console()
ASCII_LOGO = r"""
▄█ █▄ ▄████████ ▄████████ ▄█ ▄█▄ ▄█ ███▄▄▄▄ ▄██████▄ ███ ▄██████▄ ▄██████▄ ▄█
███ ███ ███ ███ ███ ███ ███ ▄███▀ ███ ███▀▀▀██▄ ███ ███ ▀█████████▄ ███ ███ ███ ███ ███
███ ███ ███ ███ ███ █▀ ███▐██▀ ███▌ ███ ███ ███ █▀ ▀███▀▀██ ███ ███ ███ ███ ███
▄███▄▄▄▄███▄▄ ███ ███ ███ ▄█████▀ ███▌ ███ ███ ▄███ ███ ▀ ███ ███ ███ ███ ███
▀▀███▀▀▀▀███▀ ▀███████████ ███ ▀▀█████▄ ███▌ ███ ███ ▀▀███ ████▄ ███ ███ ███ ███ ███ ███
███ ███ ███ ███ ███ █▄ ███▐██▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███
███ ███ ███ ███ ███ ███ ███ ▀███▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███▌ ▄
███ █▀ ███ █▀ ████████▀ ███ ▀█▀ █▀ ▀█ █▀ ████████▀ ▄████▀ ▀██████▀ ▀██████▀ █████▄▄██
▀ ▀
"""
tool_definitions = [
("Anonymously Hiding Tools", "🛡️"),
("Information gathering tools", "🔍"),
("Wordlist Generator", "📚"),
("Wireless attack tools", "📶"),
("SQL Injection Tools", "🧩"),
("Phishing attack tools", "🎣"),
("Web Attack tools", "🌐"),
("Post exploitation tools", "🔧"),
("Forensic tools", "🕵️"),
("Payload creation tools", "📦"),
("Exploit framework", "🧰"),
("Reverse engineering tools", "🔁"),
("DDOS Attack Tools", "⚡"),
("Remote Administrator Tools (RAT)", "🖥️"),
("XSS Attack Tools", "💥"),
("Steganograhy tools", "🖼️"),
("Other tools", "✨"),
("Update or Uninstall | Hackingtool", "♻️"),
]
all_tools = [
AnonSurfTools(),
InformationGatheringTools(),
WordlistGeneratorTools(),
WirelessAttackTools(),
SqlInjectionTools(),
PhishingAttackTools(),
WebAttackTools(),
PostExploitationTools(),
ForensicTools(),
PayloadCreatorTools(),
ExploitFrameworkTools(),
ReverseEngineeringTools(),
DDOSTools(),
RemoteAdministrationTools(),
XSSAttackTools(),
SteganographyTools(),
OtherTools(),
ToolManager()
]
class AllTools(HackingToolsCollection):
TITLE = "All tools"
TOOLS = all_tools
def show_info(self):
header = Text()
header.append(ASCII_LOGO, style="bold magenta")
header.append("\n\n",)
footer = Text.assemble(
(" https://github.com/Z4nzu/hackingtool ", "bold bright_black"),
(" | ",),
("Version 1.1.0", "bold green"),
)
warning = Text(" Please Don't Use For illegal Activity ", style="bold red")
panel = Panel(
Align.center(header + Text("\n") + footer + Text("\n") + warning),
box=box.DOUBLE,
padding=(1, 2),
border_style="magenta"
)
console.print(panel)
def build_menu():
table = Table.grid(expand=True)
table.add_column("idx", width=6, justify="right")
table.add_column("name", justify="left")
for idx, (title, icon) in enumerate(tool_definitions):
if idx == 17:
label = "[bold magenta]99[/bold magenta]"
name = f"[bold magenta]{icon} {title}[/bold magenta]"
else:
label = f"[bold magenta]{idx}[/bold magenta]"
name = f"[white]{icon}[/white] [magenta]{title}[/magenta]"
table.add_row(label, name)
top_panel = Panel(
Align.center(Text("HackingTool — Main Menu", style="bold white on magenta"), vertical="middle"),
style="magenta",
padding=(0, 1),
box=box.ROUNDED
)
menu_panel = Panel.fit(
table,
title="[bold magenta]Select a tool[/bold magenta]",
border_style="bright_magenta",
box=box.SQUARE
)
footer = Align.center(Text("Choose number and press Enter — 99 to exit", style="italic bright_black"))
console.print(top_panel)
console.print(menu_panel)
console.print(Rule(style="bright_black"))
console.print(footer)
console.print("")
def choose_path():
fpath = os.path.expanduser("~/hackingtoolpath.txt")
if not os.path.exists(fpath):
os.system("clear" if system() == "Linux" else "cls")
build_menu()
console.print(Panel("Setup path for tool installations", border_style="magenta"))
choice = Prompt.ask("[magenta]Set Path[/magenta]", choices=["1", "2"], default="2")
if choice == "1":
inpath = Prompt.ask("[magenta]Enter Path (with Directory Name)[/magenta]")
with open(fpath, "w") as f:
f.write(inpath)
console.print(f"[green]Successfully Set Path to:[/green] {inpath}")
else:
autopath = "/home/hackingtool/"
with open(fpath, "w") as f:
f.write(autopath)
console.print(f"[green]Your Default Path Is:[/green] {autopath}")
sleep(1)
return fpath
def interact_menu():
while True:
try:
build_menu()
choice = IntPrompt.ask("[magenta]Choose a tool to proceed[/magenta]", default=0)
if choice == 99:
console.print(Panel("[bold white on magenta]Goodbye — Come Back Safely[/bold white on magenta]"))
break
if 0 <= choice < len(all_tools):
tool = all_tools[choice]
name = tool_definitions[choice][0]
console.print(Panel(f"[bold magenta]{tool_definitions[choice][1]} Selected:[/bold magenta] [white]{name}"))
try:
fn = getattr(tool, "show_options", None)
if callable(fn):
fn()
else:
console.print(f"[yellow]Tool '{name}' has no interactive menu (show_options).[/yellow]")
except Exception as e:
console.print(Panel(f"[red]Error while opening {name}[/red]\n{e}", border_style="red"))
if not Confirm.ask("[magenta]Return to main menu?[/magenta]", default=True):
console.print(Panel("[bold white on magenta]Exiting...[/bold white on magenta]"))
break
else:
console.print("[red]Invalid selection. Pick a number from the menu.[/red]")
except KeyboardInterrupt:
console.print("\n[bold red]Interrupted by user — exiting[/bold red]")
break
def main():
try:
if system() == "Linux":
fpath = choose_path()
with open(fpath) as f:
archive = f.readline().strip()
os.makedirs(archive, exist_ok=True)
os.chdir(archive)
AllTools().show_info()
interact_menu()
elif system() == "Windows":
console.print(Panel("[bold red]Please run this tool on a Debian/Linux system for best results[/bold red]"))
if Confirm.ask("Open guidance link in your browser?", default=True):
webbrowser.open_new_tab("https://tinyurl.com/y522modc")
sleep(2)
else:
console.print("[yellow]Please Check Your System or Open New Issue ...[/yellow]")
except KeyboardInterrupt:
console.print("\n[bold red]Exiting ..!!![/bold red]")
sleep(1)
if __name__ == "__main__":
main()
| python | MIT | 7df27d8383095257e05c9dfd5af3ea696039d793 | 2026-01-04T14:39:20.027553Z | false |
Z4nzu/hackingtool | https://github.com/Z4nzu/hackingtool/blob/7df27d8383095257e05c9dfd5af3ea696039d793/install.py | install.py | #!/usr/bin/env python3
# install_hackingtool.py (rich-based installer UI)
import os
import sys
import shutil
import subprocess
from pathlib import Path
from rich.console import Console
from rich.panel import Panel
from rich.prompt import Prompt, Confirm, IntPrompt
from rich.table import Table
from rich.align import Align
from rich.progress import Progress, SpinnerColumn, TextColumn
from rich.text import Text
from rich import box
from random import choice
console = Console()
REPO_URL = "https://github.com/Z4nzu/hackingtool.git"
INSTALL_DIR = Path("/usr/share/hackingtool")
BIN_PATH = Path("/usr/bin/hackingtool")
VENV_DIR_NAME = "venv"
REQUIREMENTS = "requirements.txt"
def check_root():
if os.geteuid() != 0:
console.print(Panel("[red]This installer must be run as root. Use: sudo python3 install_hackingtool.py[/red]"))
sys.exit(1)
def run_cmd(cmd, check=True, capture=False, env=None):
return subprocess.run(cmd, shell=True, check=check, capture_output=capture, text=True, env=env)
def colorful_logo():
logos = ["magenta", "bright_magenta", "cyan", "blue", "green", "yellow"]
style = choice(logos)
logo_lines = r"""
▄█ █▄ ▄████████ ▄████████ ▄█ ▄█▄ ▄█ ███▄▄▄▄ ▄██████▄ ███ ▄██████▄ ▄██████▄ ▄█
███ ███ ███ ███ ███ ███ ███ ▄███▀ ███ ███▀▀▀██▄ ███ ███ ▀█████████▄ ███ ███ ███ ███ ███
███ ███ ███ ███ ███ █▀ ███▐██▀ ███▌ ███ ███ ███ █▀ ▀███▀▀██ ███ ███ ███ ███ ███
▄███▄▄▄▄███▄▄ ███ ███ ███ ▄█████▀ ███▌ ███ ███ ▄███ ███ ▀ ███ ███ ███ ███ ███
▀▀███▀▀▀▀███▀ ▀███████████ ███ ▀▀█████▄ ███▌ ███ ███ ▀▀███ ████▄ ███ ███ ███ ███ ███ ███
███ ███ ███ ███ ███ █▄ ███▐██▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███
███ ███ ███ ███ ███ ███ ███ ▀███▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███▌ ▄
███ █▀ ███ █▀ ████████▀ ███ ▀█▀ █▀ ▀█ █▀ ████████▀ ▄████▀ ▀██████▀ ▀██████▀ █████▄▄██
▀ ▀
"""
panel = Panel(Text(logo_lines, style=style), box=box.DOUBLE, border_style=style)
console.print(panel)
console.print(f"[bold {style}]https://github.com/Z4nzu/hackingtool[/bold {style}]\n")
def choose_distro():
console.print(Panel("[bold magenta]Select installation target[/bold magenta]\n\n[1] Kali / Parrot (apt)\n[2] Arch (pacman)\n[0] Exit", border_style="bright_magenta"))
choice = IntPrompt.ask("Choice", choices=["0", "1", "2"], default=1)
return choice
def check_internet():
console.print("[yellow]* Checking internet connectivity...[/yellow]")
try:
run_cmd("curl -sSf --max-time 10 https://www.google.com > /dev/null", check=True)
console.print("[green][✔] Internet connection OK[/green]")
return True
except Exception:
try:
run_cmd("curl -sSf --max-time 10 https://github.com > /dev/null", check=True)
console.print("[green][✔] Internet connection OK[/green]")
return True
except Exception:
console.print("[red][✘] Internet connection not available[/red]")
return False
def system_update_and_install(choice):
if choice == 1:
console.print("[yellow]* Running apt update/upgrade...[/yellow]")
try:
run_cmd("apt update -y && apt upgrade -y")
except subprocess.CalledProcessError as e:
console.print(f"[red][!][/red] apt update/upgrade failed (non-fatal). Continuing installation. Error: {e}")
console.print("[yellow]* Installing required packages (apt)...[/yellow]")
try:
run_cmd("apt-get install -y git python3-pip python3-venv figlet boxes php curl xdotool wget")
except subprocess.CalledProcessError as e:
console.print(f"[red][!][/red] apt-get install failed (non-fatal). You may need to install some packages manually. Error: {e}")
elif choice == 2:
console.print("[yellow]* Running pacman update...[/yellow]")
try:
run_cmd("pacman -Syu --noconfirm")
except subprocess.CalledProcessError as e:
console.print(f"[red][!][/red] pacman update failed (non-fatal). Continuing installation. Error: {e}")
console.print("[yellow]* Installing required packages (pacman)...[/yellow]")
try:
run_cmd("pacman -S --noconfirm git python-pip")
except subprocess.CalledProcessError as e:
console.print(f"[red][!][/red] pacman install failed (non-fatal). You may need to install some packages manually. Error: {e}")
else:
console.print("[red]Invalid package manager choice[/red]")
def prepare_install_dir():
if INSTALL_DIR.exists():
console.print(f"[red]The directory {INSTALL_DIR} already exists.[/red]")
if Confirm.ask("Replace it? This will remove the existing directory", default=False):
run_cmd(f"rm -rf {str(INSTALL_DIR)}")
else:
console.print("[red]Installation aborted by user.[/red]")
sys.exit(1)
INSTALL_DIR.mkdir(parents=True, exist_ok=True)
def git_clone():
console.print("[yellow]* Cloning hackingtool repository...[/yellow]")
try:
run_cmd(f"git clone {REPO_URL} {str(INSTALL_DIR)}")
console.print("[green][✔] Repository cloned[/green]")
return True
except Exception as e:
console.print(f"[red][✘] Failed to clone repository: {e}[/red]")
return False
def create_venv_and_install(choice):
venv_path = INSTALL_DIR / VENV_DIR_NAME
console.print("[yellow]* Creating virtual environment...[/yellow]")
run_cmd(f"python3 -m venv {str(venv_path)}")
activate = venv_path / "bin" / "activate"
pip = str(venv_path / "bin" / "pip")
if (INSTALL_DIR / REQUIREMENTS).exists():
console.print("[yellow]* Installing Python requirements...[/yellow]")
run_cmd(f"{pip} install -r {str(INSTALL_DIR / REQUIREMENTS)}")
else:
console.print("[yellow]requirements.txt not found, skipping pip install.[/yellow]")
if choice == 1:
run_cmd("apt install figlet -y")
elif choice == 2:
# try pacman and fallback to AUR instructions
try:
run_cmd("pacman -S --noconfirm figlet")
except Exception:
console.print("[yellow]figlet not available in pacman automatically. Consider installing from AUR.[/yellow]")
def create_launcher():
console.print("[yellow]* Creating launcher script...[/yellow]")
launcher = INSTALL_DIR / "hackingtool.sh"
with open(launcher, "w") as f:
f.write("#!/bin/bash\n")
f.write(f"source {str(INSTALL_DIR / VENV_DIR_NAME)}/bin/activate\n")
f.write(f"python3 {str(INSTALL_DIR / 'hackingtool.py')} \"$@\"\n")
os.chmod(launcher, 0o755)
# move to /usr/bin/hackingtool
if BIN_PATH.exists():
BIN_PATH.unlink()
shutil.move(str(launcher), str(BIN_PATH))
console.print(f"[green][✔] Launcher installed at {str(BIN_PATH)}[/green]")
def final_messages():
panel = Panel(
"[bold magenta]Installation complete[/bold magenta]\n\nType [bold cyan]hackingtool[/bold cyan] in terminal to start.",
border_style="magenta",
)
console.print(panel)
def main():
check_root()
console.clear()
colorful_logo()
choice = choose_distro()
if choice == 0:
console.print("[red]Exiting...[/red]")
sys.exit(0)
if not check_internet():
sys.exit(1)
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress:
progress.add_task(description="Preparing system...", total=None)
system_update_and_install(choice)
prepare_install_dir()
ok = git_clone()
if not ok:
sys.exit(1)
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress:
progress.add_task(description="Setting up virtualenv & requirements...", total=None)
create_venv_and_install(choice)
create_launcher()
final_messages()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
console.print("\n[red]Installation interrupted by user[/red]")
sys.exit(1)
except subprocess.CalledProcessError as e:
console.print(f"[red]Command failed: {e}[/red]")
sys.exit(1)
| python | MIT | 7df27d8383095257e05c9dfd5af3ea696039d793 | 2026-01-04T14:39:20.027553Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.