sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/agents/relevance_agent.py | """
Relevance Agent — Scores signals by developer relevance (0–100).
This agent uses LLM reasoning to evaluate each signal's importance to
AI/ML developers. It's a legitimate agent because relevance scoring
requires judgment, context understanding, and nuanced assessment that
pure heuristics cannot capture.
Model Selection:
Uses a fast, cost-efficient model (gpt-4.1-mini by default) because
relevance scoring is high-volume and doesn't require deep reasoning —
it's a classification task, not a synthesis task.
"""
import json
from typing import Dict, Any, List
from agno.agent import Agent
from agno.models.openai import OpenAIChat
# Central model config — override via MODEL_RELEVANCE env var
import os
DEFAULT_MODEL = os.environ.get("MODEL_RELEVANCE", "gpt-4.1-mini")
class RelevanceAgent:
"""
Agent that scores signals based on relevance to developers.
Why this IS an agent (unlike SignalCollector):
Relevance scoring requires understanding context, assessing novelty,
and making judgment calls about what matters to developers. This is
inherently a reasoning task that benefits from LLM capabilities.
Responsibilities:
- Score signals 0–100 based on developer relevance
- Provide reasoning for each score
- Gracefully fall back to heuristics when LLM unavailable
"""
def __init__(self, model_id: str = None):
"""
Initialize the Relevance Agent.
Args:
model_id: OpenAI model to use. Defaults to gpt-4.1-mini (fast, cheap).
"""
self.model_id = model_id or DEFAULT_MODEL
self.agent = Agent(
name="Relevance Scorer",
model=OpenAIChat(id=self.model_id),
role="Scores technical signals based on developer relevance",
instructions=[
"Score each signal from 0-100 based on relevance.",
"Consider: novelty, impact, actionability, and timeliness.",
"Prioritize signals relevant to AI/ML engineers.",
"Provide brief reasoning for each score.",
],
markdown=True,
)
def score(self, signal: Dict[str, Any]) -> Dict[str, Any]:
"""
Score a single signal for developer relevance.
Attempts LLM scoring first, falls back to heuristics on failure.
"""
prompt = f"""Rate the relevance of this signal for AI/ML developers.
Score from 0-100 where:
- 0-30: Low relevance (noise, off-topic)
- 31-60: Moderate relevance (interesting but not urgent)
- 61-80: High relevance (important for developers to know)
- 81-100: Critical relevance (must-know, actionable)
Signal:
- Source: {signal.get('source', 'unknown')}
- Title: {signal.get('title', 'Untitled')}
- Description: {signal.get('description', '')[:500]}
Respond with ONLY a JSON object:
{{"score": <number>, "reasoning": "<one sentence>"}}"""
try:
response = self.agent.run(prompt, stream=False)
return self._parse_response(response.content, signal)
except Exception as e:
return self._fallback_score(signal, str(e))
def score_batch(self, signals: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Score multiple signals, returning each with a 'relevance' key."""
scored = []
for signal in signals:
result = self.score(signal)
scored.append({**signal, "relevance": result})
return scored
def _parse_response(self, content: str, signal: Dict) -> Dict[str, Any]:
"""Parse LLM JSON response into structured output."""
try:
text = content.strip()
if "```" in text:
text = text.split("```")[1].replace("json", "").strip()
return json.loads(text)
except (json.JSONDecodeError, IndexError):
return self._fallback_score(signal, "Parse error")
def _fallback_score(self, signal: Dict, error: str) -> Dict[str, Any]:
"""
Heuristic fallback when LLM is unavailable.
Uses metadata signals (stars, points) as rough relevance proxies.
This is intentionally simple — the LLM path is the real logic.
"""
score = 50 # Default: moderate
metadata = signal.get("metadata", {})
if metadata.get("stars", 0) > 100:
score += 20
if metadata.get("points", 0) > 50:
score += 15
return {
"score": min(score, 100),
"reasoning": f"Heuristic score (LLM unavailable: {error})",
}
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/agents/relevance_agent.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/agents/risk_agent.py | """
Risk Agent — Assesses security risks and breaking changes.
This agent uses LLM reasoning to analyze signals for potential risks
including security vulnerabilities, breaking API changes, and deprecation
notices. It's a legitimate agent because risk assessment requires
contextual understanding of technical implications.
Model Selection:
Uses a structured-reasoning model (gpt-4.1-mini by default) because
risk assessment benefits from careful, step-by-step analysis. For
production workloads with high-stakes decisions, consider upgrading
to gpt-4.1 or o4-mini via the MODEL_RISK environment variable.
"""
import json
from typing import Dict, Any, List
from agno.agent import Agent
from agno.models.openai import OpenAIChat
# Central model config — override via MODEL_RISK env var
import os
DEFAULT_MODEL = os.environ.get("MODEL_RISK", "gpt-4.1-mini")
class RiskAgent:
"""
Agent that assesses risk levels in technical signals.
Why this IS an agent:
Risk assessment requires reasoning about technical implications,
understanding security contexts, and making judgment calls about
severity. This is inherently a reasoning task.
Responsibilities:
- Identify security vulnerabilities
- Flag breaking changes
- Detect deprecation notices
- Rate overall risk level (LOW / MEDIUM / HIGH / CRITICAL)
"""
RISK_LEVELS = ["LOW", "MEDIUM", "HIGH", "CRITICAL"]
def __init__(self, model_id: str = None):
"""
Initialize the Risk Agent.
Args:
model_id: OpenAI model to use. Defaults to gpt-4.1-mini.
"""
self.model_id = model_id or DEFAULT_MODEL
self.agent = Agent(
name="Risk Assessor",
model=OpenAIChat(id=self.model_id),
role="Assesses security and breaking change risks in technical signals",
instructions=[
"Analyze signals for security vulnerabilities.",
"Identify breaking changes that may affect developers.",
"Flag deprecation notices and migration requirements.",
"Rate risk level: LOW, MEDIUM, HIGH, or CRITICAL.",
],
markdown=True,
)
def assess(self, signal: Dict[str, Any]) -> Dict[str, Any]:
"""
Assess risk level of a signal.
Attempts LLM assessment first, falls back to keyword heuristics.
"""
prompt = f"""Analyze this technical signal for risks:
Signal:
- Source: {signal.get('source', 'unknown')}
- Title: {signal.get('title', 'Untitled')}
- Description: {signal.get('description', '')[:500]}
Assess for:
1. Security vulnerabilities
2. Breaking changes
3. Deprecations
Respond with ONLY a JSON object:
{{"risk_level": "LOW|MEDIUM|HIGH|CRITICAL", "concerns": ["<list of concerns>"], "breaking_changes": true|false}}"""
try:
response = self.agent.run(prompt, stream=False)
return self._parse_response(response.content, signal)
except Exception as e:
return self._fallback_assessment(signal, str(e))
def assess_batch(self, signals: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Assess multiple signals, returning each with a 'risk' key."""
assessed = []
for signal in signals:
result = self.assess(signal)
assessed.append({**signal, "risk": result})
return assessed
def _parse_response(self, content: str, signal: Dict) -> Dict[str, Any]:
"""Parse LLM JSON response into structured output."""
try:
text = content.strip()
if "```" in text:
text = text.split("```")[1].replace("json", "").strip()
return json.loads(text)
except (json.JSONDecodeError, IndexError):
return self._fallback_assessment(signal, "Parse error")
def _fallback_assessment(self, signal: Dict, error: str) -> Dict[str, Any]:
"""
Keyword-based fallback when LLM is unavailable.
Simple heuristic: scan title for risk-indicating keywords.
This is intentionally conservative — better to over-flag than miss.
"""
title = signal.get("title", "").lower()
risk_level = "LOW"
concerns = []
risk_keywords = {
"HIGH": ["vulnerability", "exploit", "cve", "critical", "breach"],
"MEDIUM": ["breaking", "deprecated", "removed", "migration"],
}
for level, keywords in risk_keywords.items():
if any(kw in title for kw in keywords):
risk_level = level
concerns.append(f"Keyword match: {level}")
break
return {
"risk_level": risk_level,
"concerns": concerns or [f"Heuristic (LLM unavailable: {error})"],
"breaking_changes": "breaking" in title,
}
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/agents/risk_agent.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/agents/signal_collector.py | """
Signal Collector — Pure utility (not an agent).
Signal collection is deterministic and intentionally not agent-driven.
This module aggregates signals from adapters, normalizes them to a unified
schema, and deduplicates deterministically. No LLM reasoning is involved
because collection/normalization is a mechanical transformation — using an
agent here would be decorative, not functional.
Design Decision:
Agents are used only where reasoning is required. Signal collection
involves no ambiguity, judgment, or language understanding — it's a
pipeline transformation. Wrapping it in an Agent class would mislead
readers into thinking an LLM call is necessary here.
"""
from typing import List, Dict, Any
from datetime import datetime, timezone
class SignalCollector:
"""
Utility that collects and normalizes signals from multiple sources.
NOT an agent — no LLM calls. This is an intentional design choice.
See module docstring for rationale.
Responsibilities:
- Normalize signals to unified schema
- Deduplicate deterministically (source:id composite key)
- Filter incomplete signals
"""
def collect(self, signals: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Normalize and deduplicate raw signals from adapters.
Args:
signals: Raw signals from adapters (heterogeneous schemas).
Returns:
List of normalized, deduplicated signal dictionaries.
"""
normalized = []
seen_ids = set()
for signal in signals:
# Deterministic dedup key: source + external id
signal_id = f"{signal.get('source', 'unknown')}:{signal.get('id', '')}"
if signal_id in seen_ids:
continue
seen_ids.add(signal_id)
# Normalize to unified schema
normalized.append({
"id": signal.get("id", ""),
"source": signal.get("source", "unknown"),
"title": signal.get("title", "Untitled"),
"description": signal.get("description", ""),
"url": signal.get("url", ""),
"metadata": signal.get("metadata", {}),
"collected_at": datetime.now(timezone.utc).isoformat(),
})
return normalized
def summarize_collection(self, signals: List[Dict[str, Any]]) -> str:
"""
Generate a human-readable collection summary.
Pure string formatting — no LLM needed.
"""
sources: Dict[str, int] = {}
for s in signals:
src = s.get("source", "unknown")
sources[src] = sources.get(src, 0) + 1
parts = [f"{count} from {src}" for src, count in sources.items()]
return f"Collected {len(signals)} signals: {', '.join(parts)}"
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/agents/signal_collector.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/agents/synthesis_agent.py | """
Synthesis Agent — Produces final intelligence digest.
This agent combines outputs from all previous stages to create a
comprehensive, actionable intelligence summary. It's the most reasoning-
intensive agent in the pipeline and uses the strongest available model.
Model Selection:
Uses gpt-4.1 by default — the strongest reasoning model — because
synthesis requires cross-referencing multiple signals, identifying
patterns, generating executive summaries, and producing actionable
recommendations. This is the one stage where model quality directly
impacts output quality.
Override via MODEL_SYNTHESIS env var for cost optimization.
"""
from typing import Dict, Any, List
from datetime import datetime, timezone
from agno.agent import Agent
from agno.models.openai import OpenAIChat
# Central model config — override via MODEL_SYNTHESIS env var
import os
DEFAULT_MODEL = os.environ.get("MODEL_SYNTHESIS", "gpt-4.1")
class SynthesisAgent:
"""
Agent that synthesizes all signal intelligence into a final digest.
Why this uses the strongest model:
Synthesis is the most reasoning-intensive task in the pipeline.
It must cross-reference relevance scores, risk assessments, and
source metadata to produce coherent, actionable intelligence.
Using a weaker model here would produce generic, shallow outputs.
Responsibilities:
- Combine relevance and risk assessments
- Prioritize signals by importance
- Generate executive summary
- Produce actionable recommendations
"""
def __init__(self, model_id: str = None):
"""
Initialize the Synthesis Agent.
Args:
model_id: OpenAI model to use. Defaults to gpt-4.1 (strongest reasoning).
"""
self.model_id = model_id or DEFAULT_MODEL
self.agent = Agent(
name="Intelligence Synthesizer",
model=OpenAIChat(id=self.model_id),
role="Synthesizes technical signals into actionable intelligence digests",
instructions=[
"Combine relevance scores and risk assessments.",
"Prioritize by: high relevance + critical risks first.",
"Generate an executive summary.",
"Provide actionable recommendations for developers.",
],
markdown=True,
)
def synthesize(self, signals: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
Synthesize signals into a final intelligence digest.
This method uses deterministic logic for prioritization and grouping,
then delegates summary generation to either LLM or heuristics.
"""
prioritized = self._prioritize_signals(signals)
grouped = self._group_by_source(prioritized)
summary = self._generate_summary(prioritized)
return {
"generated_at": datetime.now(timezone.utc).isoformat(),
"total_signals": len(signals),
"executive_summary": summary,
"priority_signals": prioritized[:5],
"signals_by_source": grouped,
"recommendations": self._generate_recommendations(prioritized),
}
def _prioritize_signals(self, signals: List[Dict]) -> List[Dict]:
"""Sort signals by composite priority score (relevance × risk multiplier)."""
def priority_score(signal):
relevance = signal.get("relevance", {}).get("score", 50)
risk_multiplier = {
"CRITICAL": 2.0,
"HIGH": 1.5,
"MEDIUM": 1.0,
"LOW": 0.8,
}.get(signal.get("risk", {}).get("risk_level", "LOW"), 1.0)
return relevance * risk_multiplier
return sorted(signals, key=priority_score, reverse=True)
def _group_by_source(self, signals: List[Dict]) -> Dict[str, List]:
"""Group signals by their source for categorized display."""
grouped: Dict[str, List] = {}
for signal in signals:
source = signal.get("source", "unknown")
grouped.setdefault(source, []).append(signal)
return grouped
def _generate_summary(self, signals: List[Dict]) -> str:
"""Generate executive summary from processed signals."""
if not signals:
return "No signals to summarize."
high_priority = [
s for s in signals if s.get("relevance", {}).get("score", 0) >= 70
]
critical_risks = [
s
for s in signals
if s.get("risk", {}).get("risk_level") in ["HIGH", "CRITICAL"]
]
parts = [f"Analyzed {len(signals)} signals."]
if high_priority:
parts.append(f"{len(high_priority)} high-relevance items detected.")
if critical_risks:
parts.append(
f"⚠️ {len(critical_risks)} signals with elevated risk."
)
if signals:
parts.append(f"Top signal: {signals[0].get('title', 'Unknown')}")
return " ".join(parts)
def _generate_recommendations(self, signals: List[Dict]) -> List[str]:
"""Generate actionable recommendations from signal analysis."""
recommendations = []
critical = [
s
for s in signals
if s.get("risk", {}).get("risk_level") == "CRITICAL"
]
if critical:
recommendations.append(
f"🚨 Review {len(critical)} critical-risk signals immediately"
)
high_rel = [
s for s in signals if s.get("relevance", {}).get("score", 0) >= 80
]
if high_rel:
recommendations.append(
f"📌 Prioritize {len(high_rel)} high-relevance items"
)
github = [s for s in signals if s.get("source") == "github"]
if github:
recommendations.append(
f"⭐ Explore {len(github)} trending repositories"
)
if not recommendations:
recommendations.append("✅ No urgent actions required")
return recommendations
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/agents/synthesis_agent.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/main.py | """
DevPulseAI — Multi-Agent Signal Intelligence Pipeline
Demonstrates a production-style multi-agent workflow that aggregates
technical signals from multiple sources, scores them for relevance,
assesses risks, and synthesizes an actionable intelligence digest.
Architecture:
Adapters (fetch) → SignalCollector (normalize) → RelevanceAgent (score)
→ RiskAgent (assess) → SynthesisAgent (digest)
Design Decisions:
- Signal collection is a utility, not an agent (deterministic work).
- Agents are used only where reasoning is required.
- Single provider (OpenAI) by default to reduce onboarding friction.
- Models are chosen by role: fast for classification, strong for synthesis.
Usage:
export OPENAI_API_KEY=sk-...
python main.py
Without API key: agents fall back to heuristic scoring.
"""
import os
from typing import List, Dict, Any, Optional
# Reduced default signal count for faster demo execution
DEFAULT_SIGNAL_LIMIT = 5
# Import adapters
from adapters.github import fetch_github_trending
from adapters.arxiv import fetch_arxiv_papers
from adapters.hackernews import fetch_hackernews_stories
from adapters.medium import fetch_medium_blogs
from adapters.huggingface import fetch_huggingface_models
# Import pipeline components
from agents import (
SignalCollector, # Utility — no LLM
RelevanceAgent, # Agent — gpt-4.1-mini
RiskAgent, # Agent — gpt-4.1-mini
SynthesisAgent, # Agent — gpt-4.1
)
def collect_signals(limit: Optional[int] = None) -> List[Dict[str, Any]]:
"""
Collect signals from all configured sources.
This is pure data aggregation — no LLM involved.
"""
fetch_limit = limit if limit is not None else DEFAULT_SIGNAL_LIMIT
print(f"\n📡 [1/4] Collecting Signals (limit: {fetch_limit} per source)...")
signals = []
print(" → Fetching GitHub trending repos...")
signals.extend(fetch_github_trending(limit=fetch_limit))
print(" → Fetching ArXiv papers...")
signals.extend(fetch_arxiv_papers(limit=fetch_limit))
print(" → Fetching HackerNews stories...")
signals.extend(fetch_hackernews_stories(limit=fetch_limit))
print(" → Fetching Medium blogs...")
signals.extend(fetch_medium_blogs(limit=min(fetch_limit, 3)))
print(" → Fetching HuggingFace models...")
signals.extend(fetch_huggingface_models(limit=fetch_limit))
print(f" ✓ Collected {len(signals)} raw signals")
return signals
def run_pipeline():
"""
Execute the full signal intelligence pipeline.
Pipeline stages:
1. Signal Collection — Aggregate from sources (utility, no LLM)
2. Normalization — Deduplicate and normalize (utility, no LLM)
3. Relevance Score — Rate signals 0-100 (agent, gpt-4.1-mini)
4. Risk Assessment — Identify risks (agent, gpt-4.1-mini)
5. Synthesis — Produce digest (agent, gpt-4.1)
"""
print("=" * 60)
print("🧠 DevPulseAI — Signal Intelligence Pipeline")
print("=" * 60)
# Check for API key
if not os.environ.get("OPENAI_API_KEY"):
print("\n⚠️ Warning: OPENAI_API_KEY not set.")
print(" Agents will use fallback heuristics.\n")
# Stage 1: Collect raw signals from adapters
raw_signals = collect_signals()
# Stage 2: Normalize and deduplicate (utility — no LLM)
collector = SignalCollector()
print("\n🔄 [2/4] Normalizing Signals...")
normalized = collector.collect(raw_signals)
print(f" ✓ {collector.summarize_collection(normalized)}")
# Stage 3: Score for relevance (agent — gpt-4.1-mini)
relevance = RelevanceAgent()
print("\n📊 [3/4] Scoring Relevance...")
scored = relevance.score_batch(normalized)
high_relevance = sum(
1 for s in scored if s.get("relevance", {}).get("score", 0) >= 70
)
print(f" ✓ {high_relevance}/{len(scored)} signals rated high-relevance")
# Stage 4: Assess risks (agent — gpt-4.1-mini)
risk = RiskAgent()
print("\n⚠️ [4/4] Assessing Risks...")
assessed = risk.assess_batch(scored)
critical = sum(
1
for s in assessed
if s.get("risk", {}).get("risk_level") in ["HIGH", "CRITICAL"]
)
print(f" ✓ {critical}/{len(assessed)} signals with elevated risk")
# Stage 5: Synthesize digest (agent — gpt-4.1)
synthesis = SynthesisAgent()
print("\n📋 Generating Intelligence Digest...")
digest = synthesis.synthesize(assessed)
# Output results
print("\n" + "=" * 60)
print("📄 INTELLIGENCE DIGEST")
print("=" * 60)
print(f"\n🕐 Generated: {digest['generated_at']}")
print(f"📦 Total Signals: {digest['total_signals']}")
print(f"\n📝 Summary: {digest['executive_summary']}")
print("\n🎯 Top Priority Signals:")
for i, signal in enumerate(digest.get("priority_signals", [])[:3], 1):
score = signal.get("relevance", {}).get("score", "?")
risk_level = signal.get("risk", {}).get("risk_level", "?")
print(f" {i}. [{signal['source']}] {signal['title'][:50]}...")
print(f" Relevance: {score} | Risk: {risk_level}")
print("\n💡 Recommendations:")
for rec in digest.get("recommendations", []):
print(f" • {rec}")
print("\n" + "=" * 60)
print("✅ Pipeline completed successfully!")
print("=" * 60)
return digest
if __name__ == "__main__":
run_pipeline()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/main.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/streamlit_app.py | import streamlit as st
import os
from typing import List, Dict, Any
# Import pipeline components from main.py and agents
from main import collect_signals, DEFAULT_SIGNAL_LIMIT
from agents import (
SignalCollectorAgent,
RelevanceAgent,
RiskAgent,
SynthesisAgent
)
# Page Config
st.set_page_config(
page_title="DevPulseAI – Signal Intelligence Demo",
page_icon="🧠",
layout="wide"
)
# Custom CSS for glassmorphism and premium feel
st.markdown("""
<style>
.main {
background: #0f172a;
color: #f1f5f9;
font-family: 'Inter', sans-serif;
}
.stApp {
background: linear-gradient(135deg, #0f172a 0%, #1e293b 100%);
}
.signal-card {
background: rgba(30, 41, 59, 0.7);
backdrop-filter: blur(10px);
border-radius: 12px;
padding: 20px;
margin-bottom: 20px;
border: 1px solid rgba(255, 255, 255, 0.1);
transition: transform 0.2s ease;
}
.signal-card:hover {
transform: translateY(-5px);
border-color: #3b82f6;
}
.badge {
padding: 4px 12px;
border-radius: 20px;
font-size: 0.8rem;
font-weight: 600;
text-transform: uppercase;
}
.risk-low { background: #059669; color: white; }
.risk-medium { background: #d97706; color: white; }
.risk-high { background: #dc2626; color: white; }
.risk-critical { background: #7f1d1d; color: white; }
.relevance-score {
font-size: 1.5rem;
font-weight: 700;
color: #3b82f6;
}
</style>
""", unsafe_allow_html=True)
# Title and Description
st.title("🧠 DevPulseAI – Signal Intelligence Demo")
st.markdown("""
This demo showcases a **multi-agent system** that aggregates technical signals from various developer sources,
scores them for relevance, identifies potential risks, and synthesizes a final intelligence digest.
""")
# Sidebar Configuration
st.sidebar.header("⚙️ Pipeline Configuration")
# API Key
api_key = st.sidebar.text_input("Gemini API Key (optional)", type="password", help="Provide a Google Gemini API key. If not provided, agents will use fallback heuristic logic.")
if api_key:
# Agno's GoogleGemini looks for GOOGLE_API_KEY
os.environ["GOOGLE_API_KEY"] = api_key
# Source Selection
sources = st.sidebar.multiselect(
"Signal Sources",
["GitHub", "ArXiv", "HackerNews", "Medium", "HuggingFace"],
default=["GitHub", "ArXiv", "HackerNews", "Medium", "HuggingFace"]
)
# Signal Count Slider
signal_count = st.sidebar.slider(
"Signals per source",
min_value=4,
max_value=32,
value=DEFAULT_SIGNAL_LIMIT,
step=4
)
run_button = st.sidebar.button("🚀 Run Intelligence Pipeline", use_container_width=True)
# Main Area Logic
if run_button:
if not sources:
st.warning("Please select at least one signal source.")
else:
# Initialize Agents
collector = SignalCollectorAgent()
relevance = RelevanceAgent()
risk = RiskAgent()
synthesis = SynthesisAgent()
# Step 1: Collection
with st.status("📡 Collecting and normalizing signals...", expanded=True) as status:
st.write("Fetching raw data from sources...")
# Map selected sources to fetch calls (simplified reuse)
# We use the collect_signals logic but filter by selected sources
raw_signals = []
from adapters.github import fetch_github_trending
from adapters.arxiv import fetch_arxiv_papers
from adapters.hackernews import fetch_hackernews_stories
from adapters.medium import fetch_medium_blogs
from adapters.huggingface import fetch_huggingface_models
if "GitHub" in sources:
st.write("Fetching GitHub trending...")
raw_signals.extend(fetch_github_trending(limit=signal_count))
if "ArXiv" in sources:
st.write("Fetching ArXiv papers...")
raw_signals.extend(fetch_arxiv_papers(limit=signal_count))
if "HackerNews" in sources:
st.write("Fetching HackerNews stories...")
raw_signals.extend(fetch_hackernews_stories(limit=signal_count))
if "Medium" in sources:
st.write("Fetching Medium blogs...")
raw_signals.extend(fetch_medium_blogs(limit=min(signal_count, 3)))
if "HuggingFace" in sources:
st.write("Fetching HuggingFace models...")
raw_signals.extend(fetch_huggingface_models(limit=signal_count))
st.write(f"Normalizing {len(raw_signals)} raw signals...")
normalized = collector.collect(raw_signals)
status.update(label=f"✅ {len(normalized)} unique signals collected", state="complete")
# Step 2: Analysis
col1, col2 = st.columns(2)
with col1:
with st.status("📊 Scoring Relevance...") as status:
scored = relevance.score_batch(normalized)
status.update(label="✅ Relevance scoring complete", state="complete")
with col2:
with st.status("⚠️ Assessing Security Risks...") as status:
assessed = risk.assess_batch(scored)
status.update(label="✅ Risk assessment complete", state="complete")
# Step 3: Synthesis
with st.status("📋 Generating Intelligence Digest...") as status:
digest = synthesis.synthesize(assessed)
status.update(label="✅ Final synthesis complete", state="complete")
# Display Results
st.divider()
st.header("📄 Intelligence Digest")
# Executive Summary
st.info(f"**Executive Summary:** {digest['executive_summary']}")
# Recommendations
st.subheader("💡 Recommendations")
for rec in digest['recommendations']:
st.write(f"• {rec}")
st.divider()
st.subheader("🎯 Priority Signals")
# Display signals in expandable sections
for signal in assessed:
rel = signal.get("relevance", {})
risk_info = signal.get("risk", {})
risk_level = risk_info.get("risk_level", "UNKNOWN")
with st.expander(f"[{signal['source'].upper()}] {signal['title']}"):
col_a, col_b = st.columns([3, 1])
with col_a:
st.write(f"**Description:** {signal['description']}")
st.write(f"**URL:** [{signal['url']}]({signal['url']})")
if risk_info.get("concerns"):
st.markdown("**Security Concerns:**")
for concern in risk_info["concerns"]:
st.write(f"- {concern}")
with col_b:
st.markdown("<div style='text-align: center'>", unsafe_allow_html=True)
st.markdown(f"<div class='relevance-score'>{rel.get('score', 0)}</div>", unsafe_allow_html=True)
st.markdown("<small>RELEVANCE</small>", unsafe_allow_html=True)
risk_class = f"risk-{risk_level.lower()}"
st.markdown(f"<div class='badge {risk_class}' style='margin-top:10px'>{risk_level} RISK</div>", unsafe_allow_html=True)
st.markdown("</div>", unsafe_allow_html=True)
if rel.get("reasoning"):
st.caption(f"Reason: {rel['reasoning']}")
else:
# Landing state
st.image("https://raw.githubusercontent.com/Shubhamsaboo/awesome-llm-apps/main/advanced_ai_agents/multi_agent_apps/devpulse_ai/assets/logo.png", width=200) # Placeholder for logo logic
st.info("👈 Use the sidebar to configure the pipeline and click 'Run' to begin.")
# Educational Section
with st.expander("🛠️ How it works", expanded=True):
st.markdown("""
1. **Collector Agent**: Gathers data from GitHub, ArXiv, HN, Medium, and HuggingFace.
2. **Relevance Agent**: LLM analysis to score each signal for developer impact.
3. **Risk Agent**: Scans for breaking changes, vulnerabilities, or deprecations.
4. **Synthesis Agent**: Combines all findings into an actionable report.
""")
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/streamlit_app.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ag2_adaptive_research_team/agents.py | from __future__ import annotations
from typing import Any, Dict, Optional
from autogen import AssistantAgent
DEFAULT_MODEL = "gpt-5-nano"
def make_llm_config(api_key: str, model: str = DEFAULT_MODEL, temperature: float = 0.2) -> Dict[str, Any]:
return {
"config_list": [
{
"api_type": "openai",
"model": model,
"api_key": api_key,
}
],
"temperature": temperature,
}
def build_agents(api_key: str, model: str = DEFAULT_MODEL) -> Dict[str, AssistantAgent]:
llm_config = make_llm_config(api_key=api_key, model=model)
triage_agent = AssistantAgent(
name="triage_agent",
llm_config=llm_config,
system_message=(
"You are a triage agent for a research team. "
"Classify whether the question can be answered from local documents or needs web research. "
"Respond ONLY with JSON."
),
)
local_research_agent = AssistantAgent(
name="local_research_agent",
llm_config=llm_config,
system_message=(
"You are a local research agent. Use only the provided document excerpts. "
"Return JSON with evidence and a draft answer."
),
)
web_research_agent = AssistantAgent(
name="web_research_agent",
llm_config=llm_config,
system_message=(
"You are a web research agent. Use the provided web search results only. "
"Return JSON with evidence and a draft answer."
),
)
verifier_agent = AssistantAgent(
name="verifier_agent",
llm_config=llm_config,
system_message=(
"You are a verifier. Check evidence sufficiency and identify gaps. "
"Return JSON verdict and gaps."
),
)
synthesizer_agent = AssistantAgent(
name="synthesizer_agent",
llm_config=llm_config,
system_message=(
"You are the final synthesizer. Produce a clear answer with citations to the evidence."
),
)
return {
"triage": triage_agent,
"local": local_research_agent,
"web": web_research_agent,
"verifier": verifier_agent,
"synthesizer": synthesizer_agent,
}
def run_agent(agent: AssistantAgent, prompt: str) -> str:
reply = agent.generate_reply(messages=[{"role": "user", "content": prompt}])
if isinstance(reply, dict):
return reply.get("content", "") or ""
return str(reply)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ag2_adaptive_research_team/agents.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ag2_adaptive_research_team/app.py | import os
import streamlit as st
from agents import DEFAULT_MODEL
from router import run_pipeline
from tools import build_local_index, load_documents
SEARXNG_BASE_URL = "https://searxng.site/search"
st.set_page_config(page_title="AG2 Adaptive Research Team", layout="wide")
st.title("AG2 Adaptive Research Team")
st.caption("Agent teamwork + agent-enabled routing, built with AG2")
with st.sidebar:
st.header("API Configuration")
api_key = st.text_input("OpenAI API Key", type="password")
model = st.text_input("Model", value=DEFAULT_MODEL)
web_enabled = st.toggle("Enable Web Fallback", value=True)
st.markdown(
"Web fallback uses a public SearxNG instance, which may be rate-limited."
)
st.subheader("1. Upload Local Documents")
files = st.file_uploader(
"Upload PDFs or text files",
type=["pdf", "txt", "md"],
accept_multiple_files=True,
)
st.subheader("2. Ask a Question")
question = st.text_area("Research question")
run_clicked = st.button("Run Research")
if run_clicked:
if not api_key:
st.error("Please provide your OpenAI API key.")
st.stop()
if not question.strip():
st.error("Please enter a research question.")
st.stop()
os.environ["OPENAI_API_KEY"] = api_key
documents = load_documents(files or [])
local_index = build_local_index(documents)
with st.spinner("Running the AG2 team..."):
result = run_pipeline(
question=question,
local_chunks=local_index,
api_key=api_key,
model=model,
web_enabled=web_enabled,
searxng_base_url=SEARXNG_BASE_URL,
)
st.subheader("Routing Decision")
st.json(result.get("triage", {}))
st.subheader("Evidence")
st.json(result.get("evidence", []))
st.subheader("Verifier")
st.json(result.get("verifier", {}))
st.subheader("Final Answer")
st.markdown(result.get("final_answer", ""))
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ag2_adaptive_research_team/app.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ag2_adaptive_research_team/router.py | from __future__ import annotations
import json
import re
from typing import Any, Dict, List
from agents import build_agents, run_agent
from tools import Chunk, run_searxng, search_local
def _extract_json(text: str) -> Dict[str, Any]:
match = re.search(r"\{.*\}", text, re.DOTALL)
if not match:
return {}
try:
return json.loads(match.group(0))
except json.JSONDecodeError:
return {}
def _summarize_chunks(chunks: List[Chunk]) -> str:
lines = []
for chunk in chunks:
snippet = chunk.text[:300].strip()
lines.append(f"- {chunk.doc_name} [chunk {chunk.chunk_id}]: {snippet}")
return "\n".join(lines)
def run_pipeline(
question: str,
local_chunks: List[Chunk],
api_key: str,
model: str,
web_enabled: bool,
searxng_base_url: str,
) -> Dict[str, Any]:
agents = build_agents(api_key=api_key, model=model)
doc_summary = "No local documents provided."
if local_chunks:
doc_names = sorted({chunk.doc_name for chunk in local_chunks})
doc_summary = f"Local docs: {', '.join(doc_names)} (total chunks: {len(local_chunks)})"
triage_prompt = f"""
Question: {question}
{doc_summary}
Decide the best route. Output JSON with keys:
- route: "local" or "web"
- confidence: number 0 to 1
- rationale: short string
"""
triage_raw = run_agent(agents["triage"], triage_prompt)
triage = _extract_json(triage_raw)
route = triage.get("route", "local")
if not local_chunks and web_enabled:
route = "web"
evidence: List[Dict[str, Any]] = []
draft_answer = ""
if route == "web" and web_enabled:
search_results = run_searxng(question, base_url=searxng_base_url, max_results=5)
formatted_results = "\n".join(
[
f"- {item.get('title', 'Untitled')} | {item.get('link', '')} | {item.get('snippet', '')}"
for item in search_results
]
)
web_prompt = f"""
Question: {question}
Web results:
{formatted_results}
Return JSON with keys:
- evidence: list of {{source, summary}}
- draft_answer: string
"""
web_raw = run_agent(agents["web"], web_prompt)
web_json = _extract_json(web_raw)
evidence = web_json.get("evidence", [])
draft_answer = web_json.get("draft_answer", "")
else:
hits = search_local(question, local_chunks, top_k=5)
formatted_hits = _summarize_chunks(hits)
local_prompt = f"""
Question: {question}
Document excerpts:
{formatted_hits}
Return JSON with keys:
- evidence: list of {{source, summary}}
- draft_answer: string
"""
local_raw = run_agent(agents["local"], local_prompt)
local_json = _extract_json(local_raw)
evidence = local_json.get("evidence", [])
draft_answer = local_json.get("draft_answer", "")
verifier_prompt = f"""
Question: {question}
Draft answer:
{draft_answer}
Evidence:
{json.dumps(evidence, indent=2)}
Return JSON with keys:
- verdict: "sufficient" or "insufficient"
- gaps: list of short strings
"""
verifier_raw = run_agent(agents["verifier"], verifier_prompt)
verifier = _extract_json(verifier_raw)
synth_prompt = f"""
Question: {question}
Draft answer:
{draft_answer}
Evidence:
{json.dumps(evidence, indent=2)}
Verifier verdict:
{json.dumps(verifier, indent=2)}
Provide the final answer with clear citations to the evidence sources.
"""
final_answer = run_agent(agents["synthesizer"], synth_prompt)
return {
"route": route,
"triage": triage,
"evidence": evidence,
"verifier": verifier,
"final_answer": final_answer,
}
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ag2_adaptive_research_team/router.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ag2_adaptive_research_team/tools.py | from __future__ import annotations
import re
from dataclasses import dataclass
from typing import Iterable, List
from pypdf import PdfReader
from autogen.tools.experimental import SearxngSearchTool
@dataclass
class Document:
name: str
text: str
@dataclass
class Chunk:
doc_name: str
chunk_id: int
text: str
def _clean_text(text: str) -> str:
return re.sub(r"\s+", " ", text).strip()
def _tokenize(text: str) -> List[str]:
return re.findall(r"[a-zA-Z0-9]+", text.lower())
def load_documents(uploaded_files: Iterable) -> List[Document]:
documents: List[Document] = []
for file in uploaded_files:
name = file.name
if name.lower().endswith(".pdf"):
reader = PdfReader(file)
pages_text = []
for page in reader.pages:
pages_text.append(page.extract_text() or "")
text = _clean_text("\n".join(pages_text))
else:
raw = file.read()
try:
text = raw.decode("utf-8")
except UnicodeDecodeError:
text = raw.decode("latin-1")
text = _clean_text(text)
if text:
documents.append(Document(name=name, text=text))
return documents
def chunk_text(text: str, chunk_size: int = 800, overlap: int = 120) -> List[str]:
words = text.split()
if not words:
return []
chunks: List[str] = []
start = 0
while start < len(words):
end = min(len(words), start + chunk_size)
chunk = " ".join(words[start:end])
chunks.append(chunk)
if end == len(words):
break
start = max(0, end - overlap)
return chunks
def build_local_index(documents: List[Document]) -> List[Chunk]:
index: List[Chunk] = []
for doc in documents:
chunks = chunk_text(doc.text)
for idx, chunk in enumerate(chunks, start=1):
index.append(Chunk(doc_name=doc.name, chunk_id=idx, text=chunk))
return index
def search_local(query: str, index: List[Chunk], top_k: int = 5) -> List[Chunk]:
query_tokens = set(_tokenize(query))
scored = []
for chunk in index:
chunk_tokens = set(_tokenize(chunk.text))
overlap = len(query_tokens & chunk_tokens)
if overlap == 0:
continue
scored.append((overlap, chunk))
scored.sort(key=lambda item: item[0], reverse=True)
return [item[1] for item in scored[:top_k]]
def run_searxng(query: str, base_url: str, max_results: int = 5) -> List[dict]:
tool = SearxngSearchTool(base_url=base_url)
results = tool(query=query, max_results=max_results)
if isinstance(results, dict):
return results.get("results", [])
if isinstance(results, list):
return results
return []
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ag2_adaptive_research_team/tools.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_negotiation_battle_simulator/backend/agent.py | """
AI Negotiation Battle Simulator - Backend Agent
This module creates an ADK agent wrapped with AG-UI middleware for
real-time negotiation between AI buyer and seller agents.
"""
import os
from typing import Optional
from dotenv import load_dotenv
from google.adk.agents import LlmAgent
from google.adk.tools import FunctionTool, ToolContext
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from ag_ui_adk import ADKAgent, add_adk_fastapi_endpoint
from config.scenarios import SCENARIOS, get_scenario
from config.personalities import (
BUYER_PERSONALITIES,
SELLER_PERSONALITIES,
get_personality_prompt
)
# Load environment variables
load_dotenv()
# ============================================================================
# NEGOTIATION STATE (shared between tools)
# ============================================================================
class NegotiationState:
"""Tracks the state of the negotiation."""
def __init__(self):
self.reset()
def reset(self):
self.scenario_id = "craigslist_civic"
self.buyer_personality = "cool_hand_casey"
self.seller_personality = "by_the_book_beth"
self.rounds = []
self.current_round = 0
self.status = "setup" # setup, negotiating, deal, no_deal
self.final_price = None
self.buyer_budget = 13000
self.seller_minimum = 12500
self.asking_price = 15500
# Global state
negotiation_state = NegotiationState()
# ============================================================================
# NEGOTIATION TOOLS
# ============================================================================
def configure_negotiation(
scenario_id: str = "craigslist_civic",
buyer_personality: str = "cool_hand_casey",
seller_personality: str = "by_the_book_beth",
tool_context: Optional[ToolContext] = None
) -> dict:
"""
Configure the negotiation scenario and personalities.
Args:
scenario_id: The scenario to use (craigslist_civic, vintage_guitar, apartment_sublet)
buyer_personality: Buyer's personality (desperate_dan, analytical_alex, cool_hand_casey, fair_deal_fran)
seller_personality: Seller's personality (shark_steve, by_the_book_beth, motivated_mike, drama_queen_diana)
Returns:
Configuration summary
"""
scenario = get_scenario(scenario_id)
negotiation_state.reset()
negotiation_state.scenario_id = scenario_id
negotiation_state.buyer_personality = buyer_personality
negotiation_state.seller_personality = seller_personality
negotiation_state.asking_price = scenario["asking_price"]
negotiation_state.buyer_budget = scenario["buyer_budget"]
negotiation_state.seller_minimum = scenario["seller_minimum"]
negotiation_state.status = "ready"
buyer_p = BUYER_PERSONALITIES[buyer_personality]
seller_p = SELLER_PERSONALITIES[seller_personality]
return {
"status": "configured",
"scenario": {
"title": scenario["title"],
"item": scenario["item"]["name"],
"asking_price": scenario["asking_price"],
"fair_market_value": scenario["fair_market_value"]
},
"buyer": {
"name": buyer_p["name"],
"emoji": buyer_p["emoji"],
"budget": scenario["buyer_budget"]
},
"seller": {
"name": seller_p["name"],
"emoji": seller_p["emoji"],
"minimum": scenario["seller_minimum"]
}
}
def start_negotiation(tool_context: Optional[ToolContext] = None) -> dict:
"""
Start the negotiation battle!
Returns:
Initial negotiation state and instructions
"""
if negotiation_state.status != "ready":
return {"error": "Please configure the negotiation first using configure_negotiation"}
scenario = get_scenario(negotiation_state.scenario_id)
negotiation_state.status = "negotiating"
negotiation_state.current_round = 1
return {
"status": "started",
"round": 1,
"scenario": scenario["title"],
"item": scenario["item"]["name"],
"asking_price": scenario["asking_price"],
"message": f"🔔 NEGOTIATION BEGINS! The battle for the {scenario['item']['name']} is ON!"
}
def buyer_make_offer(
offer_amount: int,
message: str,
reasoning: str = "",
tool_context: Optional[ToolContext] = None
) -> dict:
"""
Buyer makes an offer to the seller.
Args:
offer_amount: The dollar amount being offered
message: What the buyer says to the seller
reasoning: Internal reasoning for this offer
Returns:
Offer details and status
"""
if negotiation_state.status != "negotiating":
return {"error": "Negotiation not in progress"}
scenario = get_scenario(negotiation_state.scenario_id)
buyer_p = BUYER_PERSONALITIES[negotiation_state.buyer_personality]
round_data = {
"round": negotiation_state.current_round,
"type": "buyer_offer",
"offer_amount": offer_amount,
"message": message,
"reasoning": reasoning,
"buyer_name": buyer_p["name"],
"buyer_emoji": buyer_p["emoji"]
}
negotiation_state.rounds.append(round_data)
return {
"status": "offer_made",
"round": negotiation_state.current_round,
"offer": offer_amount,
"message": message,
"buyer": f"{buyer_p['emoji']} {buyer_p['name']}",
"percent_of_asking": round(offer_amount / scenario["asking_price"] * 100, 1)
}
def seller_respond(
action: str,
counter_amount: Optional[int] = None,
message: str = "",
reasoning: str = "",
tool_context: Optional[ToolContext] = None
) -> dict:
"""
Seller responds to the buyer's offer.
Args:
action: One of 'accept', 'counter', 'reject', 'walk'
counter_amount: If countering, the counter offer amount
message: What the seller says to the buyer
reasoning: Internal reasoning for this decision
Returns:
Response details and updated negotiation status
"""
if negotiation_state.status != "negotiating":
return {"error": "Negotiation not in progress"}
scenario = get_scenario(negotiation_state.scenario_id)
seller_p = SELLER_PERSONALITIES[negotiation_state.seller_personality]
round_data = {
"round": negotiation_state.current_round,
"type": "seller_response",
"action": action,
"counter_amount": counter_amount,
"message": message,
"reasoning": reasoning,
"seller_name": seller_p["name"],
"seller_emoji": seller_p["emoji"]
}
negotiation_state.rounds.append(round_data)
result = {
"status": "response_given",
"round": negotiation_state.current_round,
"action": action,
"message": message,
"seller": f"{seller_p['emoji']} {seller_p['name']}"
}
if action == "accept":
# Get the last buyer offer
last_offer = None
for r in reversed(negotiation_state.rounds):
if r["type"] == "buyer_offer":
last_offer = r["offer_amount"]
break
negotiation_state.status = "deal"
negotiation_state.final_price = last_offer
result["outcome"] = "DEAL"
result["final_price"] = last_offer
result["savings"] = scenario["asking_price"] - last_offer
result["percent_off"] = round((scenario["asking_price"] - last_offer) / scenario["asking_price"] * 100, 1)
elif action == "walk":
negotiation_state.status = "no_deal"
result["outcome"] = "SELLER_WALKED"
elif action == "counter":
result["counter_amount"] = counter_amount
negotiation_state.current_round += 1
else: # reject
negotiation_state.current_round += 1
return result
def get_negotiation_state(tool_context: Optional[ToolContext] = None) -> dict:
"""
Get the current state of the negotiation.
Returns:
Full negotiation state including history
"""
scenario = get_scenario(negotiation_state.scenario_id)
buyer_p = BUYER_PERSONALITIES[negotiation_state.buyer_personality]
seller_p = SELLER_PERSONALITIES[negotiation_state.seller_personality]
return {
"status": negotiation_state.status,
"scenario": scenario["title"],
"item": scenario["item"]["name"],
"asking_price": scenario["asking_price"],
"current_round": negotiation_state.current_round,
"buyer": {
"name": buyer_p["name"],
"emoji": buyer_p["emoji"],
"budget": negotiation_state.buyer_budget
},
"seller": {
"name": seller_p["name"],
"emoji": seller_p["emoji"],
"minimum": negotiation_state.seller_minimum
},
"rounds": negotiation_state.rounds,
"final_price": negotiation_state.final_price
}
def get_available_scenarios(tool_context: Optional[ToolContext] = None) -> dict:
"""
Get all available negotiation scenarios.
Returns:
List of available scenarios with details
"""
scenarios = []
for key, s in SCENARIOS.items():
scenarios.append({
"id": key,
"title": s["title"],
"emoji": s["emoji"],
"description": s["description"],
"item": s["item"]["name"],
"asking_price": s["asking_price"]
})
return {"scenarios": scenarios}
def get_available_personalities(tool_context: Optional[ToolContext] = None) -> dict:
"""
Get all available buyer and seller personalities.
Returns:
Available personalities for both buyer and seller
"""
buyers = []
for key, p in BUYER_PERSONALITIES.items():
buyers.append({
"id": key,
"name": p["name"],
"emoji": p["emoji"],
"description": p["description"]
})
sellers = []
for key, p in SELLER_PERSONALITIES.items():
sellers.append({
"id": key,
"name": p["name"],
"emoji": p["emoji"],
"description": p["description"]
})
return {"buyers": buyers, "sellers": sellers}
# ============================================================================
# ADK AGENT DEFINITION
# ============================================================================
negotiation_agent = LlmAgent(
name="NegotiationBattleAgent",
model="gemini-3-flash-preview",
description="AI Negotiation Battle Simulator - orchestrates dramatic negotiations between buyer and seller agents",
instruction="""
You are the NEGOTIATION BATTLE MASTER! 🎮 You orchestrate epic negotiations between AI buyer and seller agents.
YOUR ROLE:
You manage a negotiation battle where a Buyer agent and a Seller agent negotiate over an item.
You play BOTH roles, switching between them to create a dramatic back-and-forth negotiation.
AVAILABLE TOOLS:
- get_available_scenarios: See what scenarios are available
- get_available_personalities: See buyer/seller personality options
- configure_negotiation: Set up the scenario and personalities
- start_negotiation: Begin the battle!
- buyer_make_offer: Make an offer as the buyer
- seller_respond: Respond as the seller (accept/counter/reject/walk)
- get_negotiation_state: Check current negotiation status
HOW TO RUN A NEGOTIATION:
1. First, use configure_negotiation to set up the scenario and personalities
2. Use start_negotiation to begin
3. Alternate between buyer_make_offer and seller_respond
4. Play each role authentically based on their personality!
5. Continue until a deal is reached or someone walks away
PERSONALITY GUIDELINES:
- Each personality has distinct traits - embody them fully!
- Buyers have budgets they shouldn't exceed
- Sellers have minimums they won't go below
- Create DRAMA and tension in the negotiation
- Make the dialogue feel real and entertaining
IMPORTANT:
- When the user asks to start a negotiation, first show them the options
- Let them pick scenario and personalities, or use defaults
- Once configured, run the full negotiation automatically
- Provide colorful commentary between rounds
- Celebrate deals and mourn walkways dramatically!
Be entertaining, dramatic, and make this feel like a real negotiation showdown! 🎭
""",
tools=[
FunctionTool(get_available_scenarios),
FunctionTool(get_available_personalities),
FunctionTool(configure_negotiation),
FunctionTool(start_negotiation),
FunctionTool(buyer_make_offer),
FunctionTool(seller_respond),
FunctionTool(get_negotiation_state),
]
)
# ============================================================================
# AG-UI + FASTAPI SETUP
# ============================================================================
# Create ADK middleware agent
adk_negotiation_agent = ADKAgent(
adk_agent=negotiation_agent,
app_name="negotiation_battle",
user_id="battle_user",
session_timeout_seconds=3600,
use_in_memory_services=True
)
# Create FastAPI app
app = FastAPI(
title="AI Negotiation Battle Simulator",
description="Watch AI agents battle it out in epic negotiations!",
version="1.0.0"
)
# Add CORS middleware for frontend
app.add_middleware(
CORSMiddleware,
allow_origins=["http://localhost:3000", "http://127.0.0.1:3000"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Add AG-UI endpoint
add_adk_fastapi_endpoint(app, adk_negotiation_agent, path="/")
# ============================================================================
# MAIN
# ============================================================================
if __name__ == "__main__":
import uvicorn
if not os.getenv("GOOGLE_API_KEY"):
print("⚠️ Warning: GOOGLE_API_KEY not set!")
print(" Get your key from: https://aistudio.google.com/")
print()
port = int(os.getenv("PORT", 8000))
print(f"🎮 AI Negotiation Battle Simulator starting on port {port}")
print(f" AG-UI endpoint: http://localhost:{port}/")
uvicorn.run(app, host="0.0.0.0", port=port)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/ai_negotiation_battle_simulator/backend/agent.py",
"license": "Apache License 2.0",
"lines": 364,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_negotiation_battle_simulator/backend/agents/buyer_agent.py | """Buyer agent for the negotiation battle simulator."""
from google.adk.agents import LlmAgent
def create_buyer_agent(
scenario: dict,
personality_prompt: str = "",
model: str = "gemini-3-flash-preview"
) -> LlmAgent:
"""Create a buyer agent configured for a specific negotiation scenario.
Args:
scenario: The negotiation scenario configuration
personality_prompt: Optional personality traits to inject
model: The LLM model to use
Returns:
Configured LlmAgent for the buyer role
"""
item = scenario["item"]
base_instruction = f"""You are a BUYER in a negotiation for a {item['name']}.
=== THE SITUATION ===
{scenario['buyer_context']}
=== WHAT YOU'RE BUYING ===
Item: {item['name']}
Asking Price: ${scenario['asking_price']:,}
Your Budget: ${scenario['buyer_budget']:,}
Fair Market Value: ~${scenario['fair_market_value']:,}
Positive aspects:
{chr(10).join(f' + {p}' for p in item.get('positives', []))}
Issues you can leverage:
{chr(10).join(f' - {i}' for i in item.get('issues', []))}
=== YOUR STAKES ===
{scenario['buyer_stakes']}
=== YOUR SECRET (influences your behavior, but never state directly) ===
{scenario['buyer_secret']}
=== NEGOTIATION RULES ===
1. NEVER exceed your budget of ${scenario['buyer_budget']:,} unless absolutely necessary
2. Start lower than you're willing to pay - leave room to negotiate
3. Use the item's issues to justify lower offers
4. Stay in character throughout
5. React authentically to the seller's counteroffers
6. Know when to walk away (you have other options)
=== YOUR GOAL ===
Get the {item['name']} for the best possible price, ideally under ${scenario['fair_market_value']:,}.
But you really want this item, so find the balance between value and closing the deal.
{personality_prompt}
=== RESPONSE FORMAT ===
When making an offer, respond with a JSON object like this:
{{
"offer_amount": 12000,
"message": "What you say to the seller - be in character, conversational!",
"reasoning": "Brief internal reasoning for this offer",
"confidence": 7,
"willing_to_walk": false
}}
Always respond with valid JSON. Make your message sound natural and in-character!
"""
return LlmAgent(
name="buyer_agent",
model=model,
description=f"Buyer negotiating for {item['name']}",
instruction=base_instruction,
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/ai_negotiation_battle_simulator/backend/agents/buyer_agent.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_negotiation_battle_simulator/backend/agents/seller_agent.py | """Seller agent for the negotiation battle simulator."""
from google.adk.agents import LlmAgent
def create_seller_agent(
scenario: dict,
personality_prompt: str = "",
model: str = "gemini-3-flash-preview"
) -> LlmAgent:
"""Create a seller agent configured for a specific negotiation scenario.
Args:
scenario: The negotiation scenario configuration
personality_prompt: Optional personality traits to inject
model: The LLM model to use
Returns:
Configured LlmAgent for the seller role
"""
item = scenario["item"]
base_instruction = f"""You are a SELLER in a negotiation for your {item['name']}.
=== THE SITUATION ===
{scenario['seller_context']}
=== WHAT YOU'RE SELLING ===
Item: {item['name']}
Your Asking Price: ${scenario['asking_price']:,}
Your Minimum (walk away below this): ${scenario['seller_minimum']:,}
Fair Market Value: ~${scenario['fair_market_value']:,}
Why it's worth the price:
{chr(10).join(f' + {p}' for p in item.get('positives', []))}
Issues you may need to address:
{chr(10).join(f' - {i}' for i in item.get('issues', []))}
=== YOUR STAKES ===
{scenario['seller_stakes']}
=== YOUR SECRET (influences your behavior, but never state directly) ===
{scenario['seller_secret']}
=== NEGOTIATION RULES ===
1. NEVER go below your minimum of ${scenario['seller_minimum']:,}
2. Start firm - you've priced it fairly
3. Counter lowball offers with smaller concessions
4. Highlight the positives to justify your price
5. Stay in character throughout
6. Create urgency when appropriate ("I have other interested buyers")
7. Know when to stand firm vs. when to close the deal
=== YOUR GOAL ===
Sell the {item['name']} for the best possible price, ideally at or above ${scenario['fair_market_value']:,}.
But you do need to sell, so find the balance between maximizing value and closing the deal.
{personality_prompt}
=== RESPONSE FORMAT ===
When responding to an offer, respond with a JSON object like this:
{{
"action": "counter",
"counter_amount": 14500,
"message": "What you say to the buyer - be in character, conversational!",
"reasoning": "Brief internal reasoning for this decision",
"firmness": 7
}}
For "action", use one of:
- "accept" - You accept the offer (no counter_amount needed)
- "counter" - You make a counteroffer (include counter_amount)
- "reject" - You reject outright but don't walk away
- "walk" - You're done negotiating
Always respond with valid JSON. Make your message sound natural and in-character!
"""
return LlmAgent(
name="seller_agent",
model=model,
description=f"Seller of {item['name']}",
instruction=base_instruction,
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/ai_negotiation_battle_simulator/backend/agents/seller_agent.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_negotiation_battle_simulator/backend/config/personalities.py | """Agent personality configurations for the negotiation simulator."""
from typing import TypedDict
class PersonalityConfig(TypedDict):
"""Configuration for an agent personality."""
name: str
emoji: str
description: str
traits: list[str]
opening_style: str
concession_rate: str # How quickly they give ground
walkaway_threshold: str # When they'll walk away
secret_motivation: str
# ============================================================================
# BUYER PERSONALITIES
# ============================================================================
BUYER_PERSONALITIES: dict[str, PersonalityConfig] = {
"desperate_dan": {
"name": "Desperate Dan",
"emoji": "😰",
"description": "Needs the car TODAY. Terrible poker face.",
"traits": [
"Reveals too much about urgency",
"Makes emotional appeals",
"Caves quickly under pressure",
"Genuinely nice but easily manipulated"
],
"opening_style": "Start at 75% of asking, mention time pressure early",
"concession_rate": "Fast - will increase offer by 5-8% each round",
"walkaway_threshold": "Very high - will go up to 95% of budget before walking",
"secret_motivation": "New job starts Monday, public transit is 2 hours each way"
},
"analytical_alex": {
"name": "Analytical Alex",
"emoji": "🧮",
"description": "Cites every data point. Very logical, somewhat robotic.",
"traits": [
"Quotes KBB, Edmunds, and market data constantly",
"Breaks down value into itemized components",
"Unemotional, focused on fair market value",
"Respects logic, immune to emotional manipulation"
],
"opening_style": "Start at exactly market value minus depreciation factors",
"concession_rate": "Slow and calculated - only moves when given new data",
"walkaway_threshold": "Firm - walks if price exceeds data-backed value by 10%",
"secret_motivation": "Has analyzed 47 similar listings, knows exact fair price"
},
"cool_hand_casey": {
"name": "Cool-Hand Casey",
"emoji": "😎",
"description": "Master of the walkaway bluff. Ice in their veins.",
"traits": [
"Never shows eagerness, always seems ready to leave",
"Uses strategic silence",
"Mentions other options constantly",
"Extremely patient, will wait out the seller"
],
"opening_style": "Lowball at 65% of asking, seem indifferent",
"concession_rate": "Glacial - small moves only after long pauses",
"walkaway_threshold": "Will actually walk at fair value, not bluffing",
"secret_motivation": "Has two backup cars lined up, genuinely doesn't care"
},
"fair_deal_fran": {
"name": "Fair-Deal Fran",
"emoji": "🤝",
"description": "Just wants everyone to win. Seeks middle ground.",
"traits": [
"Proposes split-the-difference solutions",
"Acknowledges seller's perspective",
"Builds rapport before negotiating",
"Values relationship over small dollar amounts"
],
"opening_style": "Start at 85% of asking, explain reasoning kindly",
"concession_rate": "Moderate - moves to meet in the middle",
"walkaway_threshold": "Medium - walks if seller is unreasonable, not just expensive",
"secret_motivation": "Believes in karma, wants seller to feel good about deal"
}
}
# ============================================================================
# SELLER PERSONALITIES
# ============================================================================
SELLER_PERSONALITIES: dict[str, PersonalityConfig] = {
"shark_steve": {
"name": "Shark Steve",
"emoji": "🦈",
"description": "Never drops more than 5%. Take it or leave it attitude.",
"traits": [
"Creates artificial scarcity",
"Never makes the first concession",
"Uses high-pressure tactics",
"Dismissive of lowball offers"
],
"opening_style": "Price is firm, mentions multiple interested buyers",
"concession_rate": "Minimal - 1-2% per round maximum",
"walkaway_threshold": "Will pretend to walk to create urgency",
"secret_motivation": "Actually has car payment due and needs to sell"
},
"by_the_book_beth": {
"name": "By-The-Book Beth",
"emoji": "📊",
"description": "Goes strictly by KBB. Reasonable but firm.",
"traits": [
"References official valuations",
"Provides documentation for pricing",
"Fair but won't go below market value",
"Responds well to logical arguments"
],
"opening_style": "Asks KBB private party value, shows service records",
"concession_rate": "Steady - will adjust based on condition factors",
"walkaway_threshold": "Won't go below KBB fair condition price",
"secret_motivation": "Has no rush, will wait for right buyer"
},
"motivated_mike": {
"name": "Motivated Mike",
"emoji": "😅",
"description": "Really needs to sell. More flexible than he wants to be.",
"traits": [
"Mentions reasons for selling",
"Open to creative deals",
"Shows nervousness about timeline",
"Accepts reasonable offers quickly"
],
"opening_style": "Prices competitively, emphasizes quick sale",
"concession_rate": "Fast - will drop 3-5% per round",
"walkaway_threshold": "Low - very reluctant to lose a serious buyer",
"secret_motivation": "Already bought the new car, paying two car payments"
},
"drama_queen_diana": {
"name": "Drama Queen Diana",
"emoji": "🎭",
"description": "Everything is 'my final offer' (it's never final).",
"traits": [
"Theatrical reactions to offers",
"Claims emotional attachment to car",
"Uses guilt and stories",
"Actually negotiable despite protests"
],
"opening_style": "Tells the car's 'story', prices emotionally",
"concession_rate": "Appears slow but actually moderate after drama",
"walkaway_threshold": "Threatens to walk constantly but never does",
"secret_motivation": "Car holds memories of ex, secretly wants it gone"
}
}
def get_personality_prompt(role: str, personality_key: str) -> str:
"""Generate a personality-specific prompt addition for an agent.
Args:
role: Either 'buyer' or 'seller'
personality_key: Key from the personality dictionary
Returns:
A string to append to the agent's base instructions
"""
personalities = BUYER_PERSONALITIES if role == "buyer" else SELLER_PERSONALITIES
p = personalities.get(personality_key)
if not p:
return ""
return f"""
YOUR PERSONALITY: {p['emoji']} {p['name']}
{p['description']}
YOUR TRAITS:
{chr(10).join(f'- {trait}' for trait in p['traits'])}
NEGOTIATION STYLE:
- Opening Approach: {p['opening_style']}
- How You Concede: {p['concession_rate']}
- When You Walk Away: {p['walkaway_threshold']}
SECRET (never reveal directly, but it influences your behavior):
{p['secret_motivation']}
Stay in character! Your personality should come through in how you phrase offers,
react to counteroffers, and handle pressure.
"""
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/ai_negotiation_battle_simulator/backend/config/personalities.py",
"license": "Apache License 2.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_negotiation_battle_simulator/backend/config/scenarios.py | """Negotiation scenario configurations."""
from typing import TypedDict
class ScenarioConfig(TypedDict):
"""Configuration for a negotiation scenario."""
id: str
title: str
emoji: str
description: str
# The item being negotiated
item: dict # name, details, condition, etc.
# Pricing
asking_price: int
fair_market_value: int
buyer_budget: int
seller_minimum: int
# Context
buyer_context: str
seller_context: str
# Stakes
buyer_stakes: str
seller_stakes: str
# Drama elements
buyer_secret: str
seller_secret: str
twist: str # Something that could be revealed mid-negotiation
# ============================================================================
# SCENARIOS
# ============================================================================
SCENARIOS: dict[str, ScenarioConfig] = {
"craigslist_civic": {
"id": "craigslist_civic",
"title": "The Craigslist Showdown",
"emoji": "🚗",
"description": "A classic used car negotiation with secrets on both sides.",
"item": {
"name": "2019 Honda Civic EX",
"year": 2019,
"make": "Honda",
"model": "Civic EX",
"mileage": 45000,
"color": "Lunar Silver Metallic",
"condition": "Excellent",
"issues": ["Minor scratch on rear bumper", "Small chip in windshield"],
"positives": [
"Single owner",
"Full service records",
"New tires (6 months ago)",
"No accidents",
"Garage kept"
]
},
"asking_price": 15500,
"fair_market_value": 14000,
"buyer_budget": 13000,
"seller_minimum": 12500,
"buyer_context": """
You're a recent college graduate who just landed your first real job.
The commute is 25 miles each way, and public transit would take 2 hours.
You've saved up $13,000 over the past year, with a secret $500 emergency buffer.
You've been looking for 3 weeks and this is the best car you've seen.
""",
"seller_context": """
You bought this Civic new for $24,000 and it's been your daily driver.
You're upgrading to an SUV because your family is growing.
KBB says private party value is $14,000-$15,000 for excellent condition.
You've already put a deposit on the new car and want to close this sale.
""",
"buyer_stakes": "Job starts Monday. No car means either decline the job or brutal commute.",
"seller_stakes": "New SUV deposit is non-refundable. Need this money for the down payment.",
"buyer_secret": "You could technically go up to $13,500 using your emergency fund, but you really don't want to.",
"seller_secret": "The 'other interested buyer' you might mention? They're very flaky and probably won't show.",
"twist": "The seller's spouse mentioned at the test drive that they 'need this gone before the baby comes in 2 weeks.'"
},
"vintage_guitar": {
"id": "vintage_guitar",
"title": "The Vintage Axe",
"emoji": "🎸",
"description": "A musician hunts for their dream guitar at a local shop.",
"item": {
"name": "1978 Fender Stratocaster",
"year": 1978,
"make": "Fender",
"model": "Stratocaster",
"condition": "Very Good",
"issues": ["Some fret wear", "Non-original tuning pegs", "Case shows age"],
"positives": [
"All original electronics",
"Original pickups (legendary CBS-era)",
"Great neck feel",
"Authentic relic'd finish",
"Plays beautifully"
]
},
"asking_price": 8500,
"fair_market_value": 7500,
"buyer_budget": 7000,
"seller_minimum": 6500,
"buyer_context": """
You're a professional session musician who's been searching for a late-70s Strat
with the right feel. You've played through dozens and this one just speaks to you.
Your budget is $7,000 but this is The One.
""",
"seller_context": """
You run a vintage guitar shop. This Strat came from an estate sale where you
paid $4,500. It's been in the shop for 3 months and floor space is money.
You need at least $6,500 to keep margins healthy.
""",
"buyer_stakes": "You have a big studio session next week. The right guitar could define your sound.",
"seller_stakes": "Rent is due and you've got two more estate buys coming in that need funding.",
"buyer_secret": "You could stretch to $7,500 by selling your backup amp, but you'd really rather not.",
"seller_secret": "You've had this for 90 days and need to move inventory. Might take $6,200.",
"twist": "The buyer mentions they're recording with a famous producer who loves vintage gear."
},
"apartment_sublet": {
"id": "apartment_sublet",
"title": "The Sublet Standoff",
"emoji": "🏠",
"description": "Negotiating rent for a 3-month summer sublet in a hot market.",
"item": {
"name": "Studio Apartment Sublet",
"type": "Studio apartment",
"location": "Downtown, 5 min walk to transit",
"duration": "3 months (June-August)",
"condition": "Recently renovated",
"issues": ["Street noise", "No dishwasher", "Coin laundry"],
"positives": [
"Great location",
"In-unit washer/dryer",
"Rooftop access",
"Utilities included",
"Furnished"
]
},
"asking_price": 2200, # per month
"fair_market_value": 2000,
"buyer_budget": 1800,
"seller_minimum": 1700,
"buyer_context": """
You're a summer intern at a tech company. They're paying $5,000/month stipend.
Housing eats into that significantly. You need something walkable to the office.
""",
"seller_context": """
You're leaving for a 3-month work trip and need to cover rent while gone.
Your rent is $1,600/month. You're hoping to make a small profit or at least break even.
""",
"buyer_stakes": "Your internship starts in 2 weeks. You need housing locked down.",
"seller_stakes": "You leave in 10 days. Empty apartment means paying double rent.",
"buyer_secret": "Company will reimburse up to $2,000/month but you'd love to pocket the difference.",
"seller_secret": "You've had two other inquiries but they fell through. Getting nervous.",
"twist": "The sublet includes your neighbor's cat-sitting duties (easy, cat is chill)."
}
}
def get_scenario(scenario_id: str) -> ScenarioConfig:
"""Get a scenario by ID.
Args:
scenario_id: The scenario identifier
Returns:
The scenario configuration
Raises:
KeyError: If scenario not found
"""
if scenario_id not in SCENARIOS:
raise KeyError(f"Unknown scenario: {scenario_id}. Available: {list(SCENARIOS.keys())}")
return SCENARIOS[scenario_id]
def format_item_description(scenario: ScenarioConfig) -> str:
"""Format the item being negotiated into a readable description."""
item = scenario["item"]
lines = [
f"**{item['name']}**",
"",
"Condition: " + item.get("condition", "Good"),
"",
"Positives:",
]
for positive in item.get("positives", []):
lines.append(f" ✓ {positive}")
lines.append("")
lines.append("Issues to Note:")
for issue in item.get("issues", []):
lines.append(f" • {issue}")
return "\n".join(lines)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/ai_negotiation_battle_simulator/backend/config/scenarios.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:rag_tutorials/knowledge_graph_rag_citations/knowledge_graph_rag.py | """
Knowledge Graph RAG with Verifiable Citations
A Streamlit app demonstrating how Knowledge Graph-based RAG provides:
1. Multi-hop reasoning across documents
2. Verifiable source attribution for every claim
3. Transparent reasoning traces
This example uses Ollama for local LLM inference and Neo4j for the knowledge graph.
"""
import streamlit as st
import ollama
from ollama import Client as OllamaClient
from neo4j import GraphDatabase
from typing import List, Dict, Tuple
import re
import os
from dataclasses import dataclass
import json
import hashlib
# Configure Ollama host from environment (for Docker)
OLLAMA_HOST = os.environ.get('OLLAMA_HOST', 'http://localhost:11434')
ollama_client = OllamaClient(host=OLLAMA_HOST)
# ============================================================================
# Data Models
# ============================================================================
@dataclass
class Entity:
"""Represents an entity extracted from documents."""
id: str
name: str
entity_type: str
description: str
source_doc: str
source_chunk: str
@dataclass
class Relationship:
"""Represents a relationship between entities."""
source: str
target: str
relation_type: str
description: str
source_doc: str
@dataclass
class Citation:
"""Represents a verifiable citation for a claim."""
claim: str
source_document: str
source_text: str
confidence: float
reasoning_path: List[str]
@dataclass
class AnswerWithCitations:
"""Final answer with full attribution."""
answer: str
citations: List[Citation]
reasoning_trace: List[str]
# ============================================================================
# Knowledge Graph Manager
# ============================================================================
class KnowledgeGraphManager:
"""Manages the Neo4j knowledge graph for RAG."""
def __init__(self, uri: str, user: str, password: str):
self.driver = GraphDatabase.driver(uri, auth=(user, password))
def close(self):
self.driver.close()
def clear_graph(self):
"""Clear all nodes and relationships."""
with self.driver.session() as session:
session.run("MATCH (n) DETACH DELETE n")
def add_entity(self, entity: Entity):
"""Add an entity to the knowledge graph."""
with self.driver.session() as session:
session.run(
"""
MERGE (e:Entity {id: $id})
SET e.name = $name,
e.type = $entity_type,
e.description = $description,
e.source_doc = $source_doc,
e.source_chunk = $source_chunk
""",
id=entity.id,
name=entity.name,
entity_type=entity.entity_type,
description=entity.description,
source_doc=entity.source_doc,
source_chunk=entity.source_chunk
)
def add_relationship(self, rel: Relationship):
"""Add a relationship between entities."""
with self.driver.session() as session:
session.run(
"""
MATCH (a:Entity {name: $source})
MATCH (b:Entity {name: $target})
MERGE (a)-[r:RELATES_TO {type: $rel_type}]->(b)
SET r.description = $description,
r.source_doc = $source_doc
""",
source=rel.source,
target=rel.target,
rel_type=rel.relation_type,
description=rel.description,
source_doc=rel.source_doc
)
def find_related_entities(self, entity_name: str, hops: int = 2) -> List[Dict]:
"""Find entities related within N hops, with full provenance."""
with self.driver.session() as session:
result = session.run(
f"""
MATCH path = (start:Entity)-[*1..{hops}]-(related:Entity)
WHERE toLower(start.name) CONTAINS toLower($name) OR toLower(start.description) CONTAINS toLower($name)
RETURN related.name as name,
related.description as description,
related.source_doc as source,
related.source_chunk as chunk,
[r in relationships(path) | r.description] as path_descriptions
LIMIT 20
""",
name=entity_name, hops=hops
)
return [dict(record) for record in result]
def semantic_search(self, query: str) -> List[Dict]:
"""Search for relevant entities based on query."""
with self.driver.session() as session:
# Simple text matching (in production, use vector embeddings)
result = session.run(
"""
MATCH (e:Entity)
WHERE e.name CONTAINS $query
OR e.description CONTAINS $query
RETURN e.name as name,
e.description as description,
e.source_doc as source,
e.source_chunk as chunk,
e.type as type
LIMIT 10
""",
query=query
)
return [dict(record) for record in result]
# ============================================================================
# LLM-based Entity Extraction
# ============================================================================
def extract_entities_with_llm(text: str, source_doc: str, model: str = "llama3.2") -> Tuple[List[Entity], List[Relationship]]:
"""Use LLM to extract entities and relationships from text."""
extraction_prompt = f"""Analyze the following text and extract:
1. KEY ENTITIES (people, organizations, concepts, technologies, events)
2. RELATIONSHIPS between these entities
For each entity, provide:
- name: The entity name
- type: Category (PERSON, ORGANIZATION, CONCEPT, TECHNOLOGY, EVENT, LOCATION)
- description: Brief description based on the text
For each relationship, provide:
- source: Source entity name
- target: Target entity name
- type: Relationship type (e.g., WORKS_FOR, CREATED, USES, LOCATED_IN)
- description: Description of how they relate
TEXT:
{text}
Respond in JSON format:
{{
"entities": [
{{"name": "...", "type": "...", "description": "..."}}
],
"relationships": [
{{"source": "...", "target": "...", "type": "...", "description": "..."}}
]
}}
"""
try:
response = ollama_client.chat(
model=model,
messages=[{"role": "user", "content": extraction_prompt}],
format="json"
)
data = json.loads(response['message']['content'])
entities = []
for e in data.get('entities', []):
entity_id = hashlib.md5(f"{e['name']}_{source_doc}".encode()).hexdigest()[:12]
entities.append(Entity(
id=entity_id,
name=e['name'],
entity_type=e['type'],
description=e['description'],
source_doc=source_doc,
source_chunk=text[:200] + "..."
))
relationships = []
for r in data.get('relationships', []):
relationships.append(Relationship(
source=r['source'],
target=r['target'],
relation_type=r['type'],
description=r['description'],
source_doc=source_doc
))
return entities, relationships
except Exception as e:
st.warning(f"Entity extraction error: {e}")
return [], []
# ============================================================================
# Multi-hop RAG with Citations
# ============================================================================
def generate_answer_with_citations(
query: str,
graph: KnowledgeGraphManager,
model: str = "llama3.2"
) -> AnswerWithCitations:
"""
Generate an answer using multi-hop graph traversal with full citations.
This is the core differentiator: every claim is traced back to source documents.
"""
reasoning_trace = []
citations = []
# Step 1: Initial semantic search
reasoning_trace.append(f"🔍 Searching knowledge graph for: '{query}'")
initial_results = graph.semantic_search(query)
if not initial_results:
return AnswerWithCitations(
answer="I couldn't find relevant information in the knowledge graph.",
citations=[],
reasoning_trace=reasoning_trace
)
reasoning_trace.append(f"📊 Found {len(initial_results)} initial entities")
# Step 2: Multi-hop expansion
all_context = []
for entity in initial_results[:3]:
reasoning_trace.append(f"🔗 Expanding from entity: {entity['name']}")
related = graph.find_related_entities(entity['name'], hops=2)
for rel in related:
all_context.append({
"entity": rel['name'],
"description": rel['description'],
"source": rel['source'],
"chunk": rel['chunk'],
"path": rel.get('path_descriptions', [])
})
reasoning_trace.append(f" → Found related: {rel['name']}")
# Step 3: Build context with source tracking
context_parts = []
source_map = {}
for i, ctx in enumerate(all_context):
source_key = f"[{i+1}]"
context_parts.append(f"{source_key} {ctx['entity']}: {ctx['description']}")
source_map[source_key] = {
"document": ctx['source'],
"text": ctx['chunk'],
"entity": ctx['entity']
}
context_text = "\n".join(context_parts)
reasoning_trace.append(f"📝 Built context from {len(context_parts)} sources")
# Step 4: Generate answer with citation requirements
answer_prompt = f"""Based on the following knowledge graph context, answer the question.
IMPORTANT: For each claim you make, cite the source using [N] notation.
CONTEXT:
{context_text}
QUESTION: {query}
Provide a comprehensive answer with inline citations [1], [2], etc. for each claim.
"""
try:
response = ollama_client.chat(
model=model,
messages=[{"role": "user", "content": answer_prompt}]
)
answer = response['message']['content']
reasoning_trace.append("✅ Generated answer with citations")
# Step 5: Extract and verify citations
citation_refs = re.findall(r'\[(\d+)\]', answer)
for ref in set(citation_refs):
key = f"[{ref}]"
if key in source_map:
src = source_map[key]
citations.append(Citation(
claim=f"Reference {key}",
source_document=src['document'],
source_text=src['text'],
confidence=0.85,
reasoning_path=[f"Entity: {src['entity']}"]
))
reasoning_trace.append(f"🔒 Verified {len(citations)} citations")
return AnswerWithCitations(
answer=answer,
citations=citations,
reasoning_trace=reasoning_trace
)
except Exception as e:
return AnswerWithCitations(
answer=f"Error generating answer: {e}",
citations=[],
reasoning_trace=reasoning_trace
)
# ============================================================================
# Streamlit UI
# ============================================================================
def main():
st.set_page_config(
page_title="Knowledge Graph RAG with Citations",
page_icon="🔍",
layout="wide"
)
st.title("🔍 Knowledge Graph RAG with Verifiable Citations")
st.markdown("""
This demo shows how **Knowledge Graph-based RAG** provides:
- **Multi-hop reasoning** across connected information
- **Verifiable source attribution** for every claim
- **Transparent reasoning traces** you can audit
Unlike traditional vector RAG, every answer is traceable to its source documents.
""")
# Sidebar configuration
st.sidebar.header("⚙️ Configuration")
neo4j_uri = st.sidebar.text_input("Neo4j URI", "bolt://localhost:7687")
neo4j_user = st.sidebar.text_input("Neo4j User", "neo4j")
neo4j_password = st.sidebar.text_input("Neo4j Password", type="password", value="password")
llm_model = st.sidebar.selectbox("LLM Model", ["llama3.2", "mistral", "phi3"])
# Initialize session state
if 'graph_initialized' not in st.session_state:
st.session_state.graph_initialized = False
st.session_state.documents = []
# Main content
tab1, tab2, tab3 = st.tabs(["📄 Add Documents", "❓ Ask Questions", "🔬 View Graph"])
with tab1:
st.header("Step 1: Build Knowledge Graph from Documents")
sample_docs = {
"AI Research Paper": """
GraphRAG is a technique developed by Microsoft Research that combines knowledge graphs
with retrieval-augmented generation. Unlike traditional RAG which uses vector similarity,
GraphRAG builds a structured knowledge graph from documents, enabling multi-hop reasoning.
The technique was introduced by researchers including Darren Edge and Ha Trinh.
GraphRAG excels at answering complex questions that require connecting information
from multiple sources, such as "What are the relationships between different research projects?"
""",
"Company Report": """
Acme Corp was founded in 2020 by Jane Smith and John Doe in San Francisco.
The company develops AI-powered analytics tools for enterprise customers.
Their flagship product, DataSense, uses machine learning to analyze business data.
Jane Smith previously worked at Google as a senior engineer on the TensorFlow team.
John Doe was a co-founder of StartupX, which was acquired by Microsoft in 2019.
Acme Corp raised $50 million in Series B funding led by Sequoia Capital.
"""
}
doc_choice = st.selectbox("Choose sample document:", list(sample_docs.keys()))
doc_text = st.text_area("Or paste your own document:", sample_docs[doc_choice], height=200)
doc_name = st.text_input("Document name:", doc_choice)
if st.button("🔨 Extract & Add to Knowledge Graph"):
with st.spinner("Extracting entities and relationships..."):
try:
graph = KnowledgeGraphManager(neo4j_uri, neo4j_user, neo4j_password)
entities, relationships = extract_entities_with_llm(doc_text, doc_name, llm_model)
for entity in entities:
graph.add_entity(entity)
for rel in relationships:
graph.add_relationship(rel)
graph.close()
st.success(f"✅ Extracted {len(entities)} entities and {len(relationships)} relationships")
with st.expander("View Extracted Entities"):
for e in entities:
st.write(f"**{e.name}** ({e.entity_type}): {e.description}")
with st.expander("View Extracted Relationships"):
for r in relationships:
st.write(f"{r.source} --[{r.relation_type}]--> {r.target}: {r.description}")
st.session_state.graph_initialized = True
st.session_state.documents.append(doc_name)
except Exception as e:
st.error(f"Error: {e}")
st.info("Make sure Neo4j is running and Ollama has the model pulled.")
with tab2:
st.header("Step 2: Ask Questions with Verifiable Answers")
if not st.session_state.graph_initialized:
st.warning("⚠️ Please add documents to the knowledge graph first.")
else:
st.info(f"📚 Knowledge graph contains documents: {', '.join(st.session_state.documents)}")
query = st.text_input("Enter your question:", "What are the key concepts in GraphRAG and who developed it?")
if st.button("🔍 Ask with Citations"):
with st.spinner("Traversing knowledge graph and generating answer..."):
try:
graph = KnowledgeGraphManager(neo4j_uri, neo4j_user, neo4j_password)
result = generate_answer_with_citations(query, graph, llm_model)
graph.close()
# Display reasoning trace
st.subheader("🧠 Reasoning Trace")
for step in result.reasoning_trace:
st.write(step)
# Display answer
st.subheader("💬 Answer")
st.markdown(result.answer)
# Display citations
st.subheader("📚 Source Citations")
if result.citations:
for i, citation in enumerate(result.citations):
with st.expander(f"Citation {i+1}: {citation.source_document}"):
st.write(f"**Source Document:** {citation.source_document}")
st.write(f"**Source Text:** {citation.source_text}")
st.write(f"**Confidence:** {citation.confidence:.0%}")
st.write(f"**Reasoning Path:** {' → '.join(citation.reasoning_path)}")
else:
st.info("No specific citations extracted for this answer.")
except Exception as e:
st.error(f"Error: {e}")
with tab3:
st.header("🔬 Knowledge Graph Visualization")
st.info("This tab shows the structure of your knowledge graph.")
if st.button("📊 Show Graph Statistics"):
try:
graph = KnowledgeGraphManager(neo4j_uri, neo4j_user, neo4j_password)
with graph.driver.session() as session:
node_count = session.run("MATCH (n) RETURN count(n) as count").single()['count']
rel_count = session.run("MATCH ()-[r]->() RETURN count(r) as count").single()['count']
col1, col2 = st.columns(2)
col1.metric("Total Entities", node_count)
col2.metric("Total Relationships", rel_count)
graph.close()
except Exception as e:
st.error(f"Error connecting to Neo4j: {e}")
if st.button("🗑️ Clear Graph"):
try:
graph = KnowledgeGraphManager(neo4j_uri, neo4j_user, neo4j_password)
graph.clear_graph()
graph.close()
st.session_state.graph_initialized = False
st.session_state.documents = []
st.success("Graph cleared!")
except Exception as e:
st.error(f"Error: {e}")
if __name__ == "__main__":
main()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "rag_tutorials/knowledge_graph_rag_citations/knowledge_graph_rag.py",
"license": "Apache License 2.0",
"lines": 425,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/multi_agent_trust_layer/multi_agent_trust_layer.py | """
🤝 Multi-Agent Trust Layer Tutorial
This tutorial demonstrates how to build a trust layer for multi-agent systems
that enables secure delegation, trust scoring, and policy enforcement.
Key concepts:
- Agent identity and registration
- Trust scoring based on behavior
- Delegation chains with scope narrowing
- Policy enforcement across agent interactions
- Full audit trail of agent communications
"""
import os
import json
import hashlib
import secrets
import logging
from datetime import datetime, timedelta
from typing import Dict, Any, List, Optional, Set
from dataclasses import dataclass, field
from enum import Enum
from collections import defaultdict
from openai import OpenAI
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# ============================================================================
# TRUST LEVELS
# ============================================================================
class TrustLevel(Enum):
"""Trust levels based on score ranges"""
SUSPENDED = "suspended" # 0-299
RESTRICTED = "restricted" # 300-499
PROBATION = "probation" # 500-699
STANDARD = "standard" # 700-899
TRUSTED = "trusted" # 900-1000
@classmethod
def from_score(cls, score: int) -> "TrustLevel":
if score >= 900:
return cls.TRUSTED
elif score >= 700:
return cls.STANDARD
elif score >= 500:
return cls.PROBATION
elif score >= 300:
return cls.RESTRICTED
else:
return cls.SUSPENDED
# ============================================================================
# CORE DATA STRUCTURES
# ============================================================================
@dataclass
class AgentIdentity:
"""Represents an agent's verified identity"""
agent_id: str
public_key: str
human_sponsor: str # Email of accountable human
organization: str
roles: List[str]
created_at: datetime = field(default_factory=datetime.utcnow)
metadata: Dict[str, Any] = field(default_factory=dict)
@dataclass
class TrustScore:
"""Agent's trust score with history"""
agent_id: str
score: int # 0-1000
level: TrustLevel
history: List[Dict[str, Any]] = field(default_factory=list)
last_updated: datetime = field(default_factory=datetime.utcnow)
def update(self, delta: int, reason: str):
"""Update trust score with bounds checking"""
old_score = self.score
self.score = max(0, min(1000, self.score + delta))
self.level = TrustLevel.from_score(self.score)
self.last_updated = datetime.utcnow()
self.history.append({
"timestamp": self.last_updated.isoformat(),
"old_score": old_score,
"new_score": self.score,
"delta": delta,
"reason": reason
})
@dataclass
class DelegationScope:
"""Defines what an agent can do under a delegation"""
allowed_actions: Set[str]
denied_actions: Set[str] = field(default_factory=set)
allowed_domains: Set[str] = field(default_factory=set)
max_tokens: int = 10000
time_limit_minutes: int = 60
max_sub_delegations: int = 0 # Can this agent delegate further?
custom_constraints: Dict[str, Any] = field(default_factory=dict)
def allows_action(self, action: str) -> bool:
"""Check if an action is allowed under this scope"""
if action in self.denied_actions:
return False
if not self.allowed_actions: # Empty means all allowed
return True
return action in self.allowed_actions
def narrow(self, child_scope: "DelegationScope") -> "DelegationScope":
"""Create a narrowed scope for sub-delegation"""
return DelegationScope(
allowed_actions=self.allowed_actions & child_scope.allowed_actions,
denied_actions=self.denied_actions | child_scope.denied_actions,
allowed_domains=self.allowed_domains & child_scope.allowed_domains if self.allowed_domains else child_scope.allowed_domains,
max_tokens=min(self.max_tokens, child_scope.max_tokens),
time_limit_minutes=min(self.time_limit_minutes, child_scope.time_limit_minutes),
max_sub_delegations=min(self.max_sub_delegations, child_scope.max_sub_delegations),
custom_constraints={**self.custom_constraints, **child_scope.custom_constraints}
)
@dataclass
class Delegation:
"""A delegation of authority from one agent to another"""
delegation_id: str
parent_agent: str
child_agent: str
scope: DelegationScope
task_description: str
created_at: datetime
expires_at: datetime
signature: str # Signed by parent agent
parent_delegation_id: Optional[str] = None # For chain tracking
tokens_used: int = 0
is_revoked: bool = False
def is_valid(self) -> bool:
"""Check if delegation is still valid"""
if self.is_revoked:
return False
if datetime.utcnow() > self.expires_at:
return False
if self.tokens_used >= self.scope.max_tokens:
return False
return True
@dataclass
class AuditEntry:
"""Audit log entry for agent interactions"""
timestamp: datetime
event_type: str
agent_id: str
action: str
delegation_id: Optional[str]
result: str # "allowed", "denied", "error"
details: Dict[str, Any]
trust_impact: int = 0
# ============================================================================
# IDENTITY REGISTRY
# ============================================================================
class IdentityRegistry:
"""Manages agent identities"""
def __init__(self):
self.identities: Dict[str, AgentIdentity] = {}
self.sponsor_to_agents: Dict[str, List[str]] = defaultdict(list)
def register(self, identity: AgentIdentity) -> bool:
"""Register a new agent identity"""
if identity.agent_id in self.identities:
logger.warning(f"Agent {identity.agent_id} already registered")
return False
self.identities[identity.agent_id] = identity
self.sponsor_to_agents[identity.human_sponsor].append(identity.agent_id)
logger.info(f"Registered agent: {identity.agent_id} (sponsor: {identity.human_sponsor})")
return True
def get(self, agent_id: str) -> Optional[AgentIdentity]:
"""Get agent identity"""
return self.identities.get(agent_id)
def revoke(self, agent_id: str, reason: str) -> bool:
"""Revoke an agent's identity"""
if agent_id in self.identities:
identity = self.identities.pop(agent_id)
self.sponsor_to_agents[identity.human_sponsor].remove(agent_id)
logger.warning(f"Revoked agent: {agent_id} - {reason}")
return True
return False
# ============================================================================
# TRUST SCORING ENGINE
# ============================================================================
class TrustScoringEngine:
"""Manages trust scores for agents"""
# Score adjustments for various events
SCORE_ADJUSTMENTS = {
"task_completed": +10,
"stayed_in_scope": +5,
"accurate_output": +2,
"scope_violation_attempt": -50,
"inaccurate_output": -30,
"resource_exceeded": -20,
"security_violation": -100,
"delegation_success": +15,
"delegation_failure": -25,
}
def __init__(self, initial_score: int = 700):
self.scores: Dict[str, TrustScore] = {}
self.initial_score = initial_score
def initialize(self, agent_id: str, initial_score: Optional[int] = None) -> TrustScore:
"""Initialize trust score for a new agent"""
score = initial_score or self.initial_score
trust_score = TrustScore(
agent_id=agent_id,
score=score,
level=TrustLevel.from_score(score)
)
self.scores[agent_id] = trust_score
return trust_score
def get(self, agent_id: str) -> Optional[TrustScore]:
"""Get agent's trust score"""
return self.scores.get(agent_id)
def record_event(self, agent_id: str, event_type: str, custom_delta: Optional[int] = None) -> int:
"""Record an event and update trust score"""
if agent_id not in self.scores:
self.initialize(agent_id)
delta = custom_delta if custom_delta is not None else self.SCORE_ADJUSTMENTS.get(event_type, 0)
self.scores[agent_id].update(delta, event_type)
logger.info(f"Trust update: {agent_id} {delta:+d} ({event_type}) → {self.scores[agent_id].score}")
return delta
# ============================================================================
# DELEGATION MANAGER
# ============================================================================
class DelegationManager:
"""Manages delegation chains between agents"""
def __init__(self, identity_registry: IdentityRegistry, trust_engine: TrustScoringEngine):
self.delegations: Dict[str, Delegation] = {}
self.agent_delegations: Dict[str, List[str]] = defaultdict(list) # agent_id -> [delegation_ids]
self.identity_registry = identity_registry
self.trust_engine = trust_engine
def create_delegation(
self,
parent_agent: str,
child_agent: str,
scope: DelegationScope,
task_description: str,
time_limit_minutes: Optional[int] = None,
parent_delegation_id: Optional[str] = None
) -> Optional[Delegation]:
"""Create a new delegation from parent to child agent"""
# Verify both agents exist
if not self.identity_registry.get(parent_agent):
logger.error(f"Parent agent not found: {parent_agent}")
return None
if not self.identity_registry.get(child_agent):
logger.error(f"Child agent not found: {child_agent}")
return None
# Check parent's trust level
parent_trust = self.trust_engine.get(parent_agent)
if parent_trust and parent_trust.level == TrustLevel.SUSPENDED:
logger.error(f"Suspended agent cannot delegate: {parent_agent}")
return None
# If this is a sub-delegation, narrow the scope
if parent_delegation_id:
parent_del = self.delegations.get(parent_delegation_id)
if not parent_del or not parent_del.is_valid():
logger.error(f"Invalid parent delegation: {parent_delegation_id}")
return None
if parent_del.scope.max_sub_delegations <= 0:
logger.error(f"No sub-delegations allowed under: {parent_delegation_id}")
return None
scope = parent_del.scope.narrow(scope)
# Create delegation
delegation_id = f"del-{secrets.token_hex(8)}"
time_limit = time_limit_minutes or scope.time_limit_minutes
delegation = Delegation(
delegation_id=delegation_id,
parent_agent=parent_agent,
child_agent=child_agent,
scope=scope,
task_description=task_description,
created_at=datetime.utcnow(),
expires_at=datetime.utcnow() + timedelta(minutes=time_limit),
signature=self._sign_delegation(parent_agent, delegation_id),
parent_delegation_id=parent_delegation_id
)
self.delegations[delegation_id] = delegation
self.agent_delegations[child_agent].append(delegation_id)
logger.info(f"Created delegation: {parent_agent} → {child_agent} ({delegation_id})")
return delegation
def _sign_delegation(self, agent_id: str, delegation_id: str) -> str:
"""Create a signature for a delegation (simplified - real impl would use crypto)"""
data = f"{agent_id}:{delegation_id}:{datetime.utcnow().isoformat()}"
return hashlib.sha256(data.encode()).hexdigest()[:16]
def validate_action(self, agent_id: str, action: str, delegation_id: Optional[str] = None) -> bool:
"""Validate if an agent can perform an action under their delegation"""
if delegation_id:
delegation = self.delegations.get(delegation_id)
if not delegation:
return False
if not delegation.is_valid():
return False
if delegation.child_agent != agent_id:
return False
return delegation.scope.allows_action(action)
# Check if agent has any valid delegation allowing this action
for del_id in self.agent_delegations.get(agent_id, []):
delegation = self.delegations.get(del_id)
if delegation and delegation.is_valid() and delegation.scope.allows_action(action):
return True
return False
def revoke_delegation(self, delegation_id: str, reason: str) -> bool:
"""Revoke a delegation"""
if delegation_id in self.delegations:
self.delegations[delegation_id].is_revoked = True
logger.warning(f"Revoked delegation: {delegation_id} - {reason}")
return True
return False
def get_active_delegations(self, agent_id: str) -> List[Delegation]:
"""Get all active delegations for an agent"""
return [
self.delegations[del_id]
for del_id in self.agent_delegations.get(agent_id, [])
if self.delegations[del_id].is_valid()
]
# ============================================================================
# POLICY ENGINE
# ============================================================================
class MultiAgentPolicyEngine:
"""Enforces policies for multi-agent interactions"""
def __init__(self, trust_engine: TrustScoringEngine, delegation_manager: DelegationManager):
self.trust_engine = trust_engine
self.delegation_manager = delegation_manager
self.role_policies: Dict[str, Dict[str, Any]] = {}
def add_role_policy(self, role: str, policy: Dict[str, Any]):
"""Add a policy for a specific role"""
self.role_policies[role] = policy
def evaluate(
self,
agent_id: str,
action: str,
roles: List[str],
delegation_id: Optional[str] = None
) -> tuple[bool, str]:
"""Evaluate if an action is allowed"""
# Check trust level
trust = self.trust_engine.get(agent_id)
if not trust:
return False, "Agent has no trust score"
if trust.level == TrustLevel.SUSPENDED:
return False, "Agent is suspended"
# Check role-based policies
for role in roles:
policy = self.role_policies.get(role, {})
# Check base trust requirement
min_trust = policy.get("base_trust_required", 0)
if trust.score < min_trust:
return False, f"Trust score {trust.score} below minimum {min_trust} for role {role}"
# Check denied actions
if action in policy.get("denied_actions", []):
return False, f"Action '{action}' denied for role {role}"
# Check allowed actions (if specified, action must be in list)
allowed = policy.get("allowed_actions", [])
if allowed and action not in allowed:
return False, f"Action '{action}' not in allowed list for role {role}"
# Check delegation
if delegation_id:
if not self.delegation_manager.validate_action(agent_id, action, delegation_id):
return False, f"Action '{action}' not allowed under delegation {delegation_id}"
# Require approval for restricted agents
if trust.level == TrustLevel.RESTRICTED:
return False, "Agent is restricted - requires human approval"
return True, "Action allowed"
# ============================================================================
# TRUST LAYER (MAIN INTERFACE)
# ============================================================================
class TrustLayer:
"""Main interface for the multi-agent trust layer"""
def __init__(self):
self.identity_registry = IdentityRegistry()
self.trust_engine = TrustScoringEngine()
self.delegation_manager = DelegationManager(self.identity_registry, self.trust_engine)
self.policy_engine = MultiAgentPolicyEngine(self.trust_engine, self.delegation_manager)
self.audit_log: List[AuditEntry] = []
def register_agent(
self,
agent_id: str,
human_sponsor: str,
organization: str,
roles: List[str],
initial_trust: int = 700
) -> bool:
"""Register a new agent with the trust layer"""
# Generate a key pair (simplified)
public_key = hashlib.sha256(f"{agent_id}:{secrets.token_hex(16)}".encode()).hexdigest()
identity = AgentIdentity(
agent_id=agent_id,
public_key=public_key,
human_sponsor=human_sponsor,
organization=organization,
roles=roles
)
if self.identity_registry.register(identity):
self.trust_engine.initialize(agent_id, initial_trust)
return True
return False
def create_delegation(
self,
from_agent: str,
to_agent: str,
scope: Dict[str, Any],
task_description: str,
time_limit_minutes: int = 60
) -> Optional[str]:
"""Create a delegation from one agent to another"""
delegation_scope = DelegationScope(
allowed_actions=set(scope.get("allowed_actions", [])),
denied_actions=set(scope.get("denied_actions", [])),
allowed_domains=set(scope.get("allowed_domains", [])),
max_tokens=scope.get("max_tokens", 10000),
time_limit_minutes=time_limit_minutes,
max_sub_delegations=scope.get("max_sub_delegations", 0)
)
delegation = self.delegation_manager.create_delegation(
parent_agent=from_agent,
child_agent=to_agent,
scope=delegation_scope,
task_description=task_description,
time_limit_minutes=time_limit_minutes
)
return delegation.delegation_id if delegation else None
def authorize_action(
self,
agent_id: str,
action: str,
delegation_id: Optional[str] = None
) -> tuple[bool, str]:
"""Authorize an action for an agent"""
identity = self.identity_registry.get(agent_id)
if not identity:
self._log_audit(agent_id, action, "denied", delegation_id, {"reason": "Unknown agent"})
return False, "Unknown agent"
allowed, reason = self.policy_engine.evaluate(
agent_id=agent_id,
action=action,
roles=identity.roles,
delegation_id=delegation_id
)
# Update trust score based on result
if allowed:
self.trust_engine.record_event(agent_id, "stayed_in_scope")
else:
self.trust_engine.record_event(agent_id, "scope_violation_attempt")
self._log_audit(
agent_id, action,
"allowed" if allowed else "denied",
delegation_id,
{"reason": reason}
)
return allowed, reason
def record_task_result(self, agent_id: str, delegation_id: str, success: bool):
"""Record the result of a delegated task"""
if success:
self.trust_engine.record_event(agent_id, "task_completed")
self.trust_engine.record_event(
self.delegation_manager.delegations[delegation_id].parent_agent,
"delegation_success"
)
else:
self.trust_engine.record_event(agent_id, "delegation_failure")
def get_trust_score(self, agent_id: str) -> Optional[int]:
"""Get an agent's trust score"""
trust = self.trust_engine.get(agent_id)
return trust.score if trust else None
def get_trust_level(self, agent_id: str) -> Optional[TrustLevel]:
"""Get an agent's trust level"""
trust = self.trust_engine.get(agent_id)
return trust.level if trust else None
def _log_audit(
self,
agent_id: str,
action: str,
result: str,
delegation_id: Optional[str],
details: Dict[str, Any]
):
"""Log an audit entry"""
entry = AuditEntry(
timestamp=datetime.utcnow(),
event_type="action_authorization",
agent_id=agent_id,
action=action,
delegation_id=delegation_id,
result=result,
details=details
)
self.audit_log.append(entry)
def get_audit_log(self, agent_id: Optional[str] = None) -> List[Dict]:
"""Get audit log, optionally filtered by agent"""
entries = self.audit_log
if agent_id:
entries = [e for e in entries if e.agent_id == agent_id]
return [
{
"timestamp": e.timestamp.isoformat(),
"event_type": e.event_type,
"agent_id": e.agent_id,
"action": e.action,
"delegation_id": e.delegation_id,
"result": e.result,
"details": e.details
}
for e in entries
]
# ============================================================================
# EXAMPLE: GOVERNED MULTI-AGENT SYSTEM
# ============================================================================
class GovernedAgent:
"""An agent that operates within the trust layer"""
def __init__(self, agent_id: str, trust_layer: TrustLayer):
self.agent_id = agent_id
self.trust_layer = trust_layer
self.current_delegation: Optional[str] = None
def execute(self, action: str, params: Dict[str, Any]) -> Dict[str, Any]:
"""Execute an action through the trust layer"""
allowed, reason = self.trust_layer.authorize_action(
self.agent_id,
action,
self.current_delegation
)
if not allowed:
return {
"success": False,
"error": f"Action denied: {reason}",
"trust_score": self.trust_layer.get_trust_score(self.agent_id)
}
# Simulate action execution
result = self._execute_action(action, params)
return {
"success": True,
"result": result,
"trust_score": self.trust_layer.get_trust_score(self.agent_id)
}
def _execute_action(self, action: str, params: Dict[str, Any]) -> str:
"""Simulate executing an action"""
return f"Executed {action} with params {params}"
# ============================================================================
# MAIN DEMO
# ============================================================================
def main():
print("🤝 Multi-Agent Trust Layer Demo")
print("=" * 40)
# Initialize trust layer
trust_layer = TrustLayer()
# Add role policies
trust_layer.policy_engine.add_role_policy("researcher", {
"base_trust_required": 500,
"allowed_actions": ["web_search", "read_document", "summarize", "analyze"],
"denied_actions": ["execute_code", "send_email", "delete_file"]
})
trust_layer.policy_engine.add_role_policy("writer", {
"base_trust_required": 600,
"allowed_actions": ["write_document", "edit_document", "summarize"],
"denied_actions": ["execute_code", "web_search"]
})
trust_layer.policy_engine.add_role_policy("orchestrator", {
"base_trust_required": 800,
"allowed_actions": [], # Can do anything not explicitly denied
"denied_actions": ["delete_system_files"]
})
# Register agents
print("\n📋 Registering agents...")
trust_layer.register_agent(
agent_id="orchestrator-001",
human_sponsor="alice@company.com",
organization="Acme Corp",
roles=["orchestrator"],
initial_trust=900
)
print("✅ Registered: orchestrator-001 (Sponsor: alice@company.com)")
trust_layer.register_agent(
agent_id="researcher-002",
human_sponsor="bob@company.com",
organization="Acme Corp",
roles=["researcher"],
initial_trust=750
)
print("✅ Registered: researcher-002 (Sponsor: bob@company.com)")
trust_layer.register_agent(
agent_id="writer-003",
human_sponsor="carol@company.com",
organization="Acme Corp",
roles=["writer"],
initial_trust=700
)
print("✅ Registered: writer-003 (Sponsor: carol@company.com)")
# Create delegation chain
print("\n🔐 Creating delegation chain...")
delegation_id = trust_layer.create_delegation(
from_agent="orchestrator-001",
to_agent="researcher-002",
scope={
"allowed_actions": ["web_search", "summarize"],
"allowed_domains": ["arxiv.org", "github.com"],
"max_tokens": 50000
},
task_description="Research recent papers on AI safety",
time_limit_minutes=30
)
print(f"✅ Delegation: orchestrator-001 → researcher-002")
print(f" ID: {delegation_id}")
print(f" Scope: web_search, summarize")
print(f" Time Limit: 30 minutes")
# Create governed agents
researcher = GovernedAgent("researcher-002", trust_layer)
researcher.current_delegation = delegation_id
writer = GovernedAgent("writer-003", trust_layer)
# Test actions
print("\n" + "=" * 40)
print("🧪 Testing Agent Actions")
print("=" * 40)
# Test 1: Allowed action within delegation
print("\n🤖 researcher-002: web_search (within scope)")
result = researcher.execute("web_search", {"query": "AI safety papers 2024"})
print(f" Result: {'✅ ALLOWED' if result['success'] else '❌ DENIED'}")
print(f" Trust Score: {result['trust_score']}")
# Test 2: Denied action outside delegation
print("\n🤖 researcher-002: send_email (outside scope)")
result = researcher.execute("send_email", {"to": "test@example.com"})
print(f" Result: {'✅ ALLOWED' if result['success'] else '❌ DENIED'}")
if not result['success']:
print(f" Reason: {result['error']}")
print(f" Trust Score: {result['trust_score']}")
# Test 3: Writer tries action not in role
print("\n🤖 writer-003: web_search (not in role)")
result = writer.execute("web_search", {"query": "test"})
print(f" Result: {'✅ ALLOWED' if result['success'] else '❌ DENIED'}")
if not result['success']:
print(f" Reason: {result['error']}")
print(f" Trust Score: {result['trust_score']}")
# Test 4: Writer does allowed action
print("\n🤖 writer-003: write_document (in role)")
result = writer.execute("write_document", {"content": "Report draft"})
print(f" Result: {'✅ ALLOWED' if result['success'] else '❌ DENIED'}")
print(f" Trust Score: {result['trust_score']}")
# Show final trust scores
print("\n" + "=" * 40)
print("📊 Final Trust Scores")
print("=" * 40)
for agent_id in ["orchestrator-001", "researcher-002", "writer-003"]:
score = trust_layer.get_trust_score(agent_id)
level = trust_layer.get_trust_level(agent_id)
print(f" {agent_id}: {score} ({level.value})")
# Show audit log
print("\n" + "=" * 40)
print("📋 Audit Log")
print("=" * 40)
for entry in trust_layer.get_audit_log():
status = "✅" if entry["result"] == "allowed" else "❌"
print(f" {status} {entry['agent_id']}: {entry['action']} - {entry['result']}")
print("\n✅ Demo complete!")
if __name__ == "__main__":
main()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/multi_agent_trust_layer/multi_agent_trust_layer.py",
"license": "Apache License 2.0",
"lines": 645,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/single_agent_apps/ai_agent_governance/ai_agent_governance.py | """
🛡️ AI Agent Governance - Policy-Based Sandboxing Tutorial
This tutorial demonstrates how to build a governance layer that enforces
deterministic policies on AI agents, preventing dangerous actions before execution.
Key concepts:
- Policy-based action validation
- Tool interception and wrapping
- Audit logging for compliance
- Rate limiting and resource controls
"""
import os
import json
import yaml
import logging
from datetime import datetime, timedelta
from typing import Dict, Any, List, Optional, Callable
from dataclasses import dataclass, field
from enum import Enum
from functools import wraps
from collections import defaultdict
import re
from openai import OpenAI
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# ============================================================================
# CORE DATA STRUCTURES
# ============================================================================
class Decision(Enum):
"""Policy evaluation decision"""
ALLOW = "allow"
DENY = "deny"
REQUIRE_APPROVAL = "require_approval"
@dataclass
class Action:
"""Represents an action an agent wants to perform"""
name: str
args: tuple = field(default_factory=tuple)
kwargs: Dict[str, Any] = field(default_factory=dict)
timestamp: datetime = field(default_factory=datetime.utcnow)
agent_id: str = "default-agent"
@dataclass
class PolicyResult:
"""Result of a policy evaluation"""
decision: Decision
reason: str
policy_name: str
is_terminal: bool = True
@dataclass
class AuditEntry:
"""An entry in the audit log"""
timestamp: datetime
action: Action
decision: Decision
reason: str
policy_matched: str
# ============================================================================
# POLICY ENGINE
# ============================================================================
class PolicyRule:
"""Base class for policy rules"""
def __init__(self, name: str):
self.name = name
def evaluate(self, action: Action) -> Optional[PolicyResult]:
"""Evaluate the action against this rule. Returns None if rule doesn't apply."""
raise NotImplementedError
class FilesystemPolicy(PolicyRule):
"""Policy for filesystem access control"""
def __init__(self, allowed_paths: List[str], denied_paths: List[str]):
super().__init__("filesystem")
self.allowed_paths = [os.path.expanduser(p) for p in allowed_paths]
self.denied_paths = [os.path.expanduser(p) for p in denied_paths]
def evaluate(self, action: Action) -> Optional[PolicyResult]:
# Check if action involves file paths
path = None
if action.kwargs.get("path"):
path = action.kwargs["path"]
elif action.kwargs.get("file_path"):
path = action.kwargs["file_path"]
elif action.args and isinstance(action.args[0], str) and "/" in action.args[0]:
path = action.args[0]
if not path:
return None # Rule doesn't apply
path = os.path.abspath(os.path.expanduser(path))
# Check denied paths first
for denied in self.denied_paths:
if path.startswith(os.path.abspath(denied)):
return PolicyResult(
decision=Decision.DENY,
reason=f"Path '{path}' matches denied pattern '{denied}'",
policy_name=self.name
)
# Check if path is in allowed paths
for allowed in self.allowed_paths:
if path.startswith(os.path.abspath(allowed)):
return PolicyResult(
decision=Decision.ALLOW,
reason=f"Path '{path}' is within allowed directory '{allowed}'",
policy_name=self.name
)
# Default deny if not explicitly allowed
return PolicyResult(
decision=Decision.DENY,
reason=f"Path '{path}' is outside allowed directories",
policy_name=self.name
)
class NetworkPolicy(PolicyRule):
"""Policy for network access control"""
def __init__(self, allowed_domains: List[str], block_all_others: bool = True):
super().__init__("network")
self.allowed_domains = allowed_domains
self.block_all_others = block_all_others
def evaluate(self, action: Action) -> Optional[PolicyResult]:
# Check if action involves URLs or domains
url = None
for key in ["url", "endpoint", "domain", "host"]:
if key in action.kwargs:
url = action.kwargs[key]
break
if not url:
# Check args for URL patterns
for arg in action.args:
if isinstance(arg, str) and ("http://" in arg or "https://" in arg):
url = arg
break
if not url:
return None # Rule doesn't apply
# Extract domain from URL
domain_match = re.search(r"https?://([^/]+)", url)
if domain_match:
domain = domain_match.group(1)
else:
domain = url
# Check if domain is allowed
for allowed in self.allowed_domains:
if domain == allowed or domain.endswith("." + allowed):
return PolicyResult(
decision=Decision.ALLOW,
reason=f"Domain '{domain}' is in allowlist",
policy_name=self.name
)
if self.block_all_others:
return PolicyResult(
decision=Decision.DENY,
reason=f"Domain '{domain}' not in allowlist",
policy_name=self.name
)
return None
class RateLimitPolicy(PolicyRule):
"""Policy for rate limiting agent actions"""
def __init__(self, max_actions_per_minute: int = 60):
super().__init__("rate_limit")
self.max_actions_per_minute = max_actions_per_minute
self.action_history: List[datetime] = []
def evaluate(self, action: Action) -> Optional[PolicyResult]:
now = datetime.utcnow()
cutoff = now - timedelta(minutes=1)
# Clean old entries
self.action_history = [t for t in self.action_history if t > cutoff]
if len(self.action_history) >= self.max_actions_per_minute:
return PolicyResult(
decision=Decision.DENY,
reason=f"Rate limit exceeded: {len(self.action_history)}/{self.max_actions_per_minute} actions per minute",
policy_name=self.name
)
self.action_history.append(now)
return None # Allow - doesn't block
class ApprovalRequiredPolicy(PolicyRule):
"""Policy requiring human approval for certain actions"""
def __init__(self, actions_requiring_approval: List[str]):
super().__init__("approval_required")
self.actions_requiring_approval = actions_requiring_approval
def evaluate(self, action: Action) -> Optional[PolicyResult]:
if action.name in self.actions_requiring_approval:
return PolicyResult(
decision=Decision.REQUIRE_APPROVAL,
reason=f"Action '{action.name}' requires human approval",
policy_name=self.name
)
return None
class PolicyEngine:
"""Central policy evaluation engine"""
def __init__(self):
self.rules: List[PolicyRule] = []
self.audit_log: List[AuditEntry] = []
def add_rule(self, rule: PolicyRule):
"""Add a policy rule to the engine"""
self.rules.append(rule)
def evaluate(self, action: Action) -> PolicyResult:
"""Evaluate an action against all policy rules"""
for rule in self.rules:
result = rule.evaluate(action)
if result and result.is_terminal:
self._log_audit(action, result)
return result
# Default allow if no rule blocks
result = PolicyResult(
decision=Decision.ALLOW,
reason="No policy rule blocked this action",
policy_name="default"
)
self._log_audit(action, result)
return result
def _log_audit(self, action: Action, result: PolicyResult):
"""Log action and decision to audit trail"""
entry = AuditEntry(
timestamp=datetime.utcnow(),
action=action,
decision=result.decision,
reason=result.reason,
policy_matched=result.policy_name
)
self.audit_log.append(entry)
logger.info(f"AUDIT: {result.decision.value.upper()} - {action.name} - {result.reason}")
def get_audit_log(self) -> List[Dict]:
"""Get audit log as serializable dictionaries"""
return [
{
"timestamp": entry.timestamp.isoformat(),
"action": entry.action.name,
"action_args": str(entry.action.kwargs),
"decision": entry.decision.value,
"reason": entry.reason,
"policy_matched": entry.policy_matched
}
for entry in self.audit_log
]
@classmethod
def from_yaml(cls, yaml_content: str) -> "PolicyEngine":
"""Create a PolicyEngine from YAML configuration"""
config = yaml.safe_load(yaml_content)
engine = cls()
policies = config.get("policies", {})
if "filesystem" in policies:
fs = policies["filesystem"]
engine.add_rule(FilesystemPolicy(
allowed_paths=fs.get("allowed_paths", []),
denied_paths=fs.get("denied_paths", [])
))
if "network" in policies:
net = policies["network"]
engine.add_rule(NetworkPolicy(
allowed_domains=net.get("allowed_domains", []),
block_all_others=net.get("block_all_others", True)
))
if "execution" in policies:
exe = policies["execution"]
if "max_actions_per_minute" in exe:
engine.add_rule(RateLimitPolicy(exe["max_actions_per_minute"]))
if "require_approval_for" in exe:
engine.add_rule(ApprovalRequiredPolicy(exe["require_approval_for"]))
return engine
# ============================================================================
# GOVERNANCE WRAPPER
# ============================================================================
class PolicyViolation(Exception):
"""Raised when an action violates a policy"""
pass
def get_human_approval(action: Action) -> bool:
"""Get human approval for an action (interactive prompt)"""
print(f"\n⏸️ APPROVAL REQUIRED")
print(f" Action: {action.name}")
print(f" Args: {action.kwargs}")
response = input(" Approve? [y/N]: ").strip().lower()
return response == "y"
def governed_tool(policy_engine: PolicyEngine, require_interactive_approval: bool = True):
"""
Decorator that wraps a tool function with governance checks.
Args:
policy_engine: The PolicyEngine to use for evaluation
require_interactive_approval: If True, prompt for approval when needed
"""
def decorator(func: Callable) -> Callable:
@wraps(func)
def wrapper(*args, **kwargs):
# Create action representation
action = Action(
name=func.__name__,
args=args,
kwargs=kwargs
)
# Evaluate against policies
result = policy_engine.evaluate(action)
if result.decision == Decision.DENY:
raise PolicyViolation(f"❌ DENIED: {result.reason}")
elif result.decision == Decision.REQUIRE_APPROVAL:
if require_interactive_approval:
if not get_human_approval(action):
raise PolicyViolation("❌ DENIED: Human rejected the action")
else:
raise PolicyViolation(f"⏸️ PENDING: {result.reason}")
# Action is allowed - execute
print(f"✅ ALLOWED: {result.reason}")
return func(*args, **kwargs)
return wrapper
return decorator
# ============================================================================
# EXAMPLE TOOLS (to be governed)
# ============================================================================
def create_governed_tools(policy_engine: PolicyEngine) -> Dict[str, Callable]:
"""Create a set of governed tools for the agent"""
@governed_tool(policy_engine)
def read_file(path: str) -> str:
"""Read contents of a file"""
with open(path, "r") as f:
return f.read()
@governed_tool(policy_engine)
def write_file(path: str, content: str) -> str:
"""Write content to a file"""
with open(path, "w") as f:
f.write(content)
return f"Successfully wrote {len(content)} characters to {path}"
@governed_tool(policy_engine)
def delete_file(path: str) -> str:
"""Delete a file"""
os.remove(path)
return f"Successfully deleted {path}"
@governed_tool(policy_engine)
def web_request(url: str) -> str:
"""Make a web request (simulated)"""
return f"Simulated response from {url}"
@governed_tool(policy_engine)
def execute_shell(command: str) -> str:
"""Execute a shell command (simulated for safety)"""
return f"Simulated execution of: {command}"
return {
"read_file": read_file,
"write_file": write_file,
"delete_file": delete_file,
"web_request": web_request,
"execute_shell": execute_shell
}
# ============================================================================
# DEMO: SIMPLE AGENT WITH GOVERNANCE
# ============================================================================
class GovernedAgent:
"""A simple agent with governance layer"""
def __init__(self, policy_engine: PolicyEngine):
self.policy_engine = policy_engine
self.tools = create_governed_tools(policy_engine)
self.client = OpenAI()
def run(self, user_request: str) -> str:
"""Process a user request with governance"""
# Create system prompt with available tools
system_prompt = """You are a helpful assistant with access to the following tools:
- read_file(path): Read a file
- write_file(path, content): Write to a file
- delete_file(path): Delete a file
- web_request(url): Make a web request
- execute_shell(command): Execute a shell command
Analyze the user's request and respond with the tool call you would make.
Format: TOOL: tool_name(arg1="value1", arg2="value2")
If no tool is needed, just respond normally."""
# Get LLM response
response = self.client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_request}
],
max_tokens=500
)
llm_response = response.choices[0].message.content
print(f"\n🤖 Agent response: {llm_response}")
# Parse and execute tool call if present
if "TOOL:" in llm_response:
tool_line = llm_response.split("TOOL:")[1].strip().split("\n")[0]
return self._execute_tool_call(tool_line)
return llm_response
def _execute_tool_call(self, tool_call: str) -> str:
"""Parse and execute a tool call string"""
# Simple parser for tool_name(kwargs)
match = re.match(r"(\w+)\((.*)\)", tool_call)
if not match:
return f"Could not parse tool call: {tool_call}"
tool_name = match.group(1)
args_str = match.group(2)
# Parse kwargs
kwargs = {}
for arg in args_str.split(", "):
if "=" in arg:
key, value = arg.split("=", 1)
kwargs[key.strip()] = value.strip().strip('"\'')
if tool_name not in self.tools:
return f"Unknown tool: {tool_name}"
try:
result = self.tools[tool_name](**kwargs)
return f"Tool result: {result}"
except PolicyViolation as e:
return str(e)
except Exception as e:
return f"Tool error: {e}"
# ============================================================================
# MAIN DEMO
# ============================================================================
def main():
print("🛡️ AI Agent Governance Demo")
print("=" * 40)
# Define policy configuration
policy_yaml = """
policies:
filesystem:
allowed_paths:
- /workspace
- /tmp
denied_paths:
- /etc
- /home
- ~/.ssh
network:
allowed_domains:
- api.openai.com
- api.github.com
block_all_others: true
execution:
max_actions_per_minute: 60
require_approval_for:
- delete_file
- execute_shell
"""
print("\n📋 Loading policy configuration...")
policy_engine = PolicyEngine.from_yaml(policy_yaml)
print("✅ Policy engine initialized with rules:")
for rule in policy_engine.rules:
print(f" - {rule.name}")
# Create governed tools
tools = create_governed_tools(policy_engine)
# Demo: Test various actions
print("\n" + "=" * 40)
print("📝 Testing Governance Layer")
print("=" * 40)
test_cases = [
("read_file", {"path": "/etc/passwd"}),
("write_file", {"path": "/workspace/report.md", "content": "# Analysis Report\n"}),
("web_request", {"url": "https://api.github.com/users"}),
("web_request", {"url": "https://unknown-site.com/api"}),
("read_file", {"path": "/workspace/data.txt"}),
]
for tool_name, kwargs in test_cases:
print(f"\n🤖 Testing: {tool_name}({kwargs})")
try:
# We need to create a temp file for the read test
if tool_name == "read_file" and kwargs["path"] == "/workspace/data.txt":
os.makedirs("/workspace", exist_ok=True)
with open("/workspace/data.txt", "w") as f:
f.write("Test data")
result = tools[tool_name](**kwargs)
print(f" Result: {result}")
except PolicyViolation as e:
print(f" {e}")
except Exception as e:
print(f" Error: {e}")
# Print audit log
print("\n" + "=" * 40)
print("📊 Audit Log")
print("=" * 40)
for entry in policy_engine.get_audit_log():
print(f" {entry['decision'].upper():8} | {entry['action']:15} | {entry['reason'][:50]}")
# Demo with LLM agent (optional - requires API key)
if os.getenv("OPENAI_API_KEY"):
print("\n" + "=" * 40)
print("🤖 LLM Agent Demo (with governance)")
print("=" * 40)
agent = GovernedAgent(policy_engine)
demo_requests = [
"Read the contents of /etc/passwd",
"Write a summary to /workspace/summary.md",
"Make a request to api.github.com to get user info"
]
for request in demo_requests:
print(f"\n👤 User: {request}")
result = agent.run(request)
print(f"📤 Result: {result}")
else:
print("\n💡 Set OPENAI_API_KEY to run the full LLM agent demo")
print("\n✅ Demo complete!")
if __name__ == "__main__":
main()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/single_agent_apps/ai_agent_governance/ai_agent_governance.py",
"license": "Apache License 2.0",
"lines": 478,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_llm_apps/llm_optimization_tools/headroom_context_optimization/headroom_demo.py | """
Headroom Context Optimization Demo
==================================
This demo shows how Headroom reduces token usage by 50-90% while preserving
accuracy. It recreates the "needle in haystack" test from the Headroom repo.
Run: python headroom_demo.py
"""
import json
from datetime import datetime, timedelta
# Generate 100 log entries with one critical error at position 67
def generate_test_logs():
"""Generate 100 log entries with a FATAL error buried at position 67."""
services = ["api-gateway", "user-service", "inventory", "auth", "payment-gateway"]
logs = []
base_time = datetime(2024, 12, 15, 0, 0, 0)
for i in range(100):
if i == 67:
# The critical error - the "needle"
logs.append({
"timestamp": (base_time + timedelta(hours=3, minutes=47, seconds=23)).isoformat() + "Z",
"level": "FATAL",
"service": "payment-gateway",
"message": "Connection pool exhausted",
"error_code": "PG-5523",
"resolution": "Increase max_connections to 500 in config/database.yml",
"affected_transactions": 1847
})
else:
# Normal INFO logs - the "haystack"
logs.append({
"timestamp": (base_time + timedelta(hours=i//60, minutes=i%60)).isoformat() + "Z",
"level": "INFO",
"service": services[i % len(services)],
"message": f"Request processed successfully - latency={50 + i}ms",
"request_id": f"req-{i:06d}",
"status_code": 200
})
return logs
def demo_without_headroom():
"""Show the baseline: sending all 100 logs to the LLM."""
logs = generate_test_logs()
json_output = json.dumps(logs, indent=2)
print("=" * 60)
print("BASELINE (Without Headroom)")
print("=" * 60)
print(f"Total log entries: {len(logs)}")
print(f"Total characters: {len(json_output):,}")
print(f"Estimated tokens: ~{len(json_output) // 4:,}")
print()
print("First 3 entries:")
for log in logs[:3]:
print(f" [{log['level']}] {log['service']}: {log['message'][:50]}...")
print(" ... 94 more INFO entries ...")
print(f" [FATAL] payment-gateway: Connection pool exhausted (position 67)")
print(" ... 32 more INFO entries ...")
print()
return logs, json_output
def demo_with_headroom():
"""Show how Headroom compresses to keep only what matters."""
logs = generate_test_logs()
# Headroom's SmartCrusher keeps:
# - First N items (context)
# - Last N items (recency)
# - Anomalies (errors, exceptions, non-INFO)
# - Query-relevant items
compressed = []
# First 3 items
compressed.extend(logs[:3])
# The FATAL error (anomaly detection)
compressed.append(logs[67])
# Last 2 items
compressed.extend(logs[-2:])
json_output = json.dumps(compressed, indent=2)
print("=" * 60)
print("WITH HEADROOM (SmartCrusher)")
print("=" * 60)
print(f"Compressed to: {len(compressed)} entries (from 100)")
print(f"Total characters: {len(json_output):,}")
print(f"Estimated tokens: ~{len(json_output) // 4:,}")
print()
print("What Headroom kept:")
for i, log in enumerate(compressed):
label = ""
if i < 3:
label = "(first items)"
elif log['level'] == 'FATAL':
label = "(anomaly - CRITICAL)"
else:
label = "(last items)"
print(f" [{log['level']}] {log['service']}: {log.get('message', log.get('error_code', '')[:40])}... {label}")
print()
return compressed, json_output
def calculate_savings(baseline_output, compressed_output):
"""Calculate token savings."""
baseline_chars = len(baseline_output)
compressed_chars = len(compressed_output)
baseline_tokens = baseline_chars // 4
compressed_tokens = compressed_chars // 4
savings_pct = (1 - compressed_tokens / baseline_tokens) * 100
print("=" * 60)
print("TOKEN SAVINGS")
print("=" * 60)
print(f"Baseline tokens: ~{baseline_tokens:,}")
print(f"Compressed tokens: ~{compressed_tokens:,}")
print(f"Tokens saved: ~{baseline_tokens - compressed_tokens:,}")
print(f"Savings: {savings_pct:.1f}%")
print()
print("The Question: 'What caused the outage? Error code? Fix?'")
print()
print("Both answers: 'payment-gateway service, error PG-5523,")
print(" fix: Increase max_connections to 500,")
print(" 1,847 transactions affected'")
print()
print(f"Same answer. {savings_pct:.1f}% fewer tokens.")
def demo_langchain_integration():
"""Show LangChain integration example."""
print()
print("=" * 60)
print("LANGCHAIN INTEGRATION")
print("=" * 60)
print("""
from langchain_openai import ChatOpenAI
from headroom.integrations import HeadroomChatModel
# Wrap your model - that's it!
llm = HeadroomChatModel(ChatOpenAI(model="gpt-4o"))
# Use exactly like before - compression is automatic
response = llm.invoke("Analyze these 100 logs and find the error")
""")
def demo_proxy_mode():
"""Show proxy mode example."""
print("=" * 60)
print("PROXY MODE (Zero Code Changes)")
print("=" * 60)
print("""
# Start the proxy
$ headroom proxy --port 8787
# Point Claude Code at it
$ ANTHROPIC_BASE_URL=http://localhost:8787 claude
# Point Cursor at it
$ OPENAI_BASE_URL=http://localhost:8787/v1 cursor
# Your existing code works unchanged - Headroom compresses transparently
""")
def main():
print()
print(" HEADROOM CONTEXT OPTIMIZATION DEMO")
print(" ===================================")
print(" Reduce LLM costs by 50-90% with intelligent compression")
print()
# Run demos
baseline_logs, baseline_output = demo_without_headroom()
compressed_logs, compressed_output = demo_with_headroom()
calculate_savings(baseline_output, compressed_output)
demo_langchain_integration()
demo_proxy_mode()
print("=" * 60)
print("GET STARTED")
print("=" * 60)
print("pip install headroom-ai[all]")
print()
print("GitHub: https://github.com/chopratejas/headroom")
print("PyPI: https://pypi.org/project/headroom-ai/")
print("=" * 60)
if __name__ == "__main__":
main()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_llm_apps/llm_optimization_tools/headroom_context_optimization/headroom_demo.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_sales_intelligence_agent_team/agent.py | from google.adk.agents import LlmAgent, SequentialAgent
from google.adk.tools import google_search
from .tools import generate_battle_card_html, generate_comparison_chart
# ============================================================================
# Stage 1: Competitor Research Agent
# ============================================================================
competitor_research_agent = LlmAgent(
name="CompetitorResearchAgent",
model="gemini-3-flash-preview",
description="Researches competitor company information using web search",
instruction="""
You are a competitive intelligence analyst researching a competitor company.
The user will specify:
- **Competitor**: The company to research (name or URL)
- **Your Product**: The product you're selling against them
Use google_search to gather comprehensive competitor intelligence:
**RESEARCH THESE AREAS:**
1. **Company Overview**
- Founded when, HQ location, company size
- Funding history and investors
- Key leadership and executives
2. **Target Market**
- Who are their ideal customers?
- What industries do they focus on?
- Company size they target (SMB, Mid-market, Enterprise)
3. **Products & Pricing**
- Main product offerings
- Pricing tiers and models
- Free trial or freemium options
4. **Recent News**
- Product launches
- Acquisitions or partnerships
- Leadership changes
5. **Customer Sentiment**
- Search G2, Capterra, TrustRadius reviews
- Common complaints and praise
- NPS or satisfaction scores if available
Be thorough and cite specific sources where possible.
""",
tools=[google_search],
output_key="competitor_profile",
)
# ============================================================================
# Stage 2: Product Feature Agent
# ============================================================================
product_feature_agent = LlmAgent(
name="ProductFeatureAgent",
model="gemini-3-flash-preview",
description="Analyzes competitor product features and capabilities",
instruction="""
You are a product analyst comparing competitor features.
COMPETITOR PROFILE:
{competitor_profile}
Use google_search to deeply analyze their product capabilities:
**ANALYZE THESE AREAS:**
1. **Core Features**
- Main functionality and capabilities
- Unique features they promote
- What problems they solve
2. **Integrations & Ecosystem**
- Native integrations
- API availability
- Marketplace/app ecosystem
3. **Technical Architecture**
- Cloud vs. on-premise options
- Mobile apps
- Security certifications (SOC2, GDPR, etc.)
4. **Pricing Details**
- Price per seat/user
- What's included in each tier
- Add-ons and hidden costs
- Contract requirements
5. **Limitations**
- Feature gaps mentioned in reviews
- Scalability concerns
- Known technical issues
Create a detailed feature inventory for comparison.
""",
tools=[google_search],
output_key="feature_analysis",
)
# ============================================================================
# Stage 3: Positioning Analyzer Agent
# ============================================================================
positioning_analyzer_agent = LlmAgent(
name="PositioningAnalyzer",
model="gemini-3-pro-preview",
description="Analyzes competitor positioning and messaging",
instruction="""
You are a marketing strategist analyzing competitor positioning.
COMPETITOR PROFILE:
{competitor_profile}
FEATURE ANALYSIS:
{feature_analysis}
Use google_search to uncover their positioning strategy:
**ANALYZE THESE AREAS:**
1. **Messaging & Taglines**
- Their homepage headline
- Key value propositions
- How they describe themselves
2. **Target Personas**
- Who do they market to?
- Job titles mentioned in marketing
- Use cases they highlight
3. **Competitive Positioning**
- How do THEY position against YOUR product?
- Comparison pages they have
- Claims they make about competitors
4. **Analyst Coverage**
- Gartner Magic Quadrant position
- Forrester Wave placement
- G2 Grid position
5. **Social Proof**
- Customer logos they showcase
- Case studies and testimonials
- Awards and recognition
Identify messaging we can counter or leverage.
""",
tools=[google_search],
output_key="positioning_intel",
)
# ============================================================================
# Stage 4: Strengths & Weaknesses Agent
# ============================================================================
swot_agent = LlmAgent(
name="StrengthsWeaknessesAgent",
model="gemini-3-pro-preview",
description="Synthesizes SWOT analysis from research",
instruction="""
You are a competitive strategist creating a SWOT analysis.
COMPETITOR PROFILE:
{competitor_profile}
FEATURE ANALYSIS:
{feature_analysis}
POSITIONING INTEL:
{positioning_intel}
**CREATE A BRUTALLY HONEST SWOT ANALYSIS:**
## Their Strengths (Where They Beat Us)
- List 5 genuine strengths
- Include evidence from reviews/market position
- Be honest about where they're better
## Their Weaknesses (Where We Beat Them)
- List 5 genuine weaknesses
- Cite specific complaints from reviews
- Identify feature gaps
## Our Advantages
- Where does OUR product win?
- What do customers love about us vs. them?
- Technical or pricing advantages
## Competitive Landmines
- Questions to ask prospects that expose their weaknesses
- Topics to bring up that favor us
- Traps to set in competitive deals
Be strategic but honest. Sales reps lose credibility if we overstate our advantages.
""",
output_key="swot_analysis",
)
# ============================================================================
# Stage 5: Objection Handler Agent
# ============================================================================
objection_handler_agent = LlmAgent(
name="ObjectionHandlerAgent",
model="gemini-3-pro-preview",
description="Creates objection handling scripts",
instruction="""
You are a sales enablement expert creating objection handling scripts.
COMPETITOR PROFILE:
{competitor_profile}
SWOT ANALYSIS:
{swot_analysis}
**CREATE OBJECTION HANDLING SCRIPTS:**
For each objection, provide:
1. **The Objection**: What the prospect says
2. **Why They Say It**: The underlying concern
3. **Your Response**: A scripted, confident response
4. **Proof Points**: Evidence to support your response
**COMMON OBJECTIONS TO ADDRESS:**
1. "We're already using [Competitor]"
2. "[Competitor] is the market leader"
3. "[Competitor] has more features"
4. "[Competitor] is cheaper"
5. "Our team already knows [Competitor]"
6. "[Competitor] integrates with our stack"
7. "We've heard [Competitor] has better support"
8. "[Competitor] is more secure/compliant"
9. "All the analysts recommend [Competitor]"
10. "We just renewed with [Competitor]"
**ALSO INCLUDE:**
## Killer Questions
Questions that expose competitor weaknesses when asked to prospects.
## Trap-Setting Phrases
Things to say early in the sales cycle that position us favorably for later.
Make responses conversational and confident, not defensive.
""",
output_key="objection_scripts",
)
# ============================================================================
# Stage 6: Battle Card Generator Agent
# ============================================================================
battle_card_generator_agent = LlmAgent(
name="BattleCardGenerator",
model="gemini-3-flash-preview",
description="Generates professional HTML battle card",
instruction="""
You create professional sales battle cards.
COMPETITOR PROFILE:
{competitor_profile}
FEATURE ANALYSIS:
{feature_analysis}
SWOT ANALYSIS:
{swot_analysis}
OBJECTION SCRIPTS:
{objection_scripts}
Use the generate_battle_card_html tool to create a professional battle card.
**PREPARE THIS DATA FOR THE TOOL:**
Compile all the research into a structured format:
1. **Quick Stats** (1-liner facts)
2. **Positioning Summary** (how to position against them)
3. **Feature Comparison** (key features, us vs. them)
4. **Their Strengths** (be honest)
5. **Their Weaknesses** (where we win)
6. **Top Objections & Responses** (quick reference)
7. **Killer Questions** (to ask prospects)
8. **Landmines** (traps to set)
Pass this compiled data to generate_battle_card_html.
The tool will create a sales-friendly HTML battle card that reps can use during calls.
""",
tools=[generate_battle_card_html],
output_key="battle_card_result",
)
# ============================================================================
# Stage 7: Comparison Chart Agent
# ============================================================================
comparison_chart_agent = LlmAgent(
name="ComparisonChartAgent",
model="gemini-3-flash-preview",
description="Creates visual comparison infographic using AI image generation",
instruction="""
You create visual comparison infographics for sales teams using AI image generation.
COMPETITOR PROFILE:
{competitor_profile}
FEATURE ANALYSIS:
{feature_analysis}
SWOT ANALYSIS:
{swot_analysis}
Use the generate_comparison_chart tool to create a visual comparison infographic.
**PREPARE COMPARISON DATA:**
Create a comprehensive comparison summary including:
1. **Overall Verdict** - Who wins overall and why
2. **Feature Scores** - List 8-10 key features with ratings:
- Feature name
- Their score (1-10)
- Our score (1-10)
- Winner indicator
3. **Key Differentiators** - Top 3 areas where we clearly win
4. **Watch Areas** - Where they have advantage
5. **Verdict Summary** - One-line recommendation
Example comparison_data format:
```
OVERALL: HubSpot leads 7-3 over Salesforce
FEATURE COMPARISON:
- Ease of Use: Them 6/10, Us 9/10 ✓
- Enterprise Features: Them 9/10, Us 7/10 ✗
- Pricing Value: Them 4/10, Us 8/10 ✓
- Integrations: Them 8/10, Us 8/10 =
- Support Quality: Them 6/10, Us 8/10 ✓
KEY WINS: Ease of use, Pricing, Support
THEIR ADVANTAGE: Enterprise features, Brand recognition
VERDICT: Recommend HubSpot for SMB/Mid-market deals
```
Pass this to generate_comparison_chart with:
- competitor_name: The competitor's name
- your_product_name: Your product's name
- comparison_data: The full comparison summary above
The tool uses Gemini's image generation to create a professional infographic.
""",
tools=[generate_comparison_chart],
output_key="chart_result",
)
# ============================================================================
# Battle Card Pipeline (SequentialAgent)
# ============================================================================
battle_card_pipeline = SequentialAgent(
name="BattleCardPipeline",
description="Complete battle card pipeline: Research → Features → Positioning → SWOT → Objections → Battle Card → Chart",
sub_agents=[
competitor_research_agent,
product_feature_agent,
positioning_analyzer_agent,
swot_agent,
objection_handler_agent,
battle_card_generator_agent,
comparison_chart_agent,
],
)
# ============================================================================
# Root Agent (Coordinator)
# ============================================================================
root_agent = LlmAgent(
name="BattleCardAnalyst",
model="gemini-3-flash-preview",
description="AI-powered competitive intelligence analyst for sales teams",
instruction="""
You are a competitive intelligence analyst helping sales teams win against competitors.
**WHAT YOU NEED FROM THE USER:**
1. **Competitor**: The company to analyze (name or URL)
2. **Your Product**: What you're selling (so we can compare)
**EXAMPLES OF VALID REQUESTS:**
- "Create a battle card for Salesforce. We sell HubSpot."
- "Battle card against Slack - we're selling Microsoft Teams"
- "Competitive analysis of Zendesk vs our product Freshdesk"
- "Help me compete against Monday.com, I sell Asana"
**WHEN USER PROVIDES BOTH:**
→ transfer_to_agent to "BattleCardPipeline"
The pipeline will:
1. Research the competitor thoroughly
2. Analyze their product features
3. Uncover their positioning strategy
4. Create SWOT analysis
5. Generate objection handling scripts
6. Create a professional battle card
7. Generate a visual comparison chart
**IF USER ONLY PROVIDES COMPETITOR:**
Ask them: "What product are you selling against [Competitor]?"
**FOR GENERAL QUESTIONS:**
Answer questions about competitive selling, battle cards, or how you can help.
After analysis, summarize key findings and mention the generated artifacts.
""",
sub_agents=[battle_card_pipeline],
)
__all__ = ["root_agent"]
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_sales_intelligence_agent_team/agent.py",
"license": "Apache License 2.0",
"lines": 331,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_sales_intelligence_agent_team/tools.py | """Custom tools for the Battle Card Pipeline.
Provides HTML battle card generation and comparison chart creation.
"""
import logging
from pathlib import Path
from datetime import datetime
from google.adk.tools import ToolContext
from google.genai import types, Client
logger = logging.getLogger("BattleCardPipeline")
# Create outputs directory for generated files
OUTPUTS_DIR = Path(__file__).parent / "outputs"
OUTPUTS_DIR.mkdir(exist_ok=True)
async def generate_battle_card_html(
battle_card_data: str,
tool_context: ToolContext
) -> dict:
"""Generate a professional HTML battle card for sales teams.
Args:
battle_card_data: Compiled competitive intelligence data
tool_context: ADK tool context for artifact saving
Returns:
dict with status and artifact info
"""
current_date = datetime.now().strftime("%B %d, %Y")
prompt = f"""Generate a professional sales battle card in HTML format.
**DATE: {current_date}**
This is a competitive battle card for sales reps to use during deals.
Style it for SALES TEAMS with:
- Clean, scannable design (reps glance at this during calls)
- Color coding: GREEN for our advantages, RED for competitor strengths
- Collapsible sections for detailed content
- Quick-reference format at the top
- Dark blue (#1e3a5f) and orange (#f97316) color scheme
- Print-friendly layout
COMPETITIVE INTELLIGENCE DATA:
{battle_card_data}
**REQUIRED SECTIONS:**
1. **Header** - Competitor name, logo placeholder, last updated date
2. **Quick Stats** - 5-6 one-liner facts about the competitor
3. **At a Glance** - 3 columns: They Win | We Win | Toss-up
4. **Feature Comparison** - Table with checkmarks/X marks
5. **Positioning** - How to position against them (2-3 sentences)
6. **Their Strengths** - Honest list with red indicators
7. **Their Weaknesses** - List with green indicators (our opportunities)
8. **Objection Handling** - Top 5 objections with quick responses
9. **Killer Questions** - Questions to ask prospects
10. **Landmines** - Traps to set in competitive deals
Make it visually impressive but FAST TO SCAN. Sales reps have seconds, not minutes.
Generate complete, valid HTML with embedded CSS and JavaScript for collapsible sections."""
try:
client = Client()
response = await client.aio.models.generate_content(
model="gemini-3-flash-preview",
contents=prompt,
)
html_content = response.text
# Clean up markdown wrapping if present
if "```html" in html_content:
start = html_content.find("```html") + 7
end = html_content.rfind("```")
html_content = html_content[start:end].strip()
elif "```" in html_content:
start = html_content.find("```") + 3
end = html_content.rfind("```")
html_content = html_content[start:end].strip()
# Save as ADK artifact
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
artifact_name = f"battle_card_{timestamp}.html"
html_artifact = types.Part.from_bytes(
data=html_content.encode('utf-8'),
mime_type="text/html"
)
version = await tool_context.save_artifact(filename=artifact_name, artifact=html_artifact)
logger.info(f"Saved battle card artifact: {artifact_name} (version {version})")
# Also save to outputs folder
filepath = OUTPUTS_DIR / artifact_name
filepath.write_text(html_content, encoding='utf-8')
return {
"status": "success",
"message": f"Battle card saved as '{artifact_name}' - view in Artifacts tab",
"artifact": artifact_name,
"version": version
}
except Exception as e:
logger.error(f"Error generating battle card: {e}")
return {"status": "error", "message": str(e)}
async def generate_comparison_chart(
competitor_name: str,
your_product_name: str,
comparison_data: str,
tool_context: ToolContext
) -> dict:
"""Generate a visual comparison infographic using Gemini image generation.
Args:
competitor_name: Name of the competitor
your_product_name: Name of your product
comparison_data: Feature comparison data with scores and highlights
tool_context: ADK tool context for artifact saving
Returns:
dict with status and artifact info
"""
prompt = f"""Create a professional competitive comparison infographic.
**COMPARISON: {your_product_name} vs {competitor_name}**
Style: Clean, modern, sales-ready infographic
Colors:
- Green (#22c55e) for {your_product_name} (your product)
- Red (#ef4444) for {competitor_name} (competitor)
- Dark blue (#1e3a5f) for headers and text
- White background
**DATA TO VISUALIZE:**
{comparison_data}
**INFOGRAPHIC LAYOUT:**
1. **Header** - "{your_product_name} vs {competitor_name}" prominently at top
2. **Score Overview** - Large visual showing overall winner
3. **Feature Comparison** - Side-by-side bars or ratings for each feature
4. **Key Differentiators** - Icons highlighting where {your_product_name} wins
5. **Bottom Line** - Clear verdict/recommendation badge
**DESIGN REQUIREMENTS:**
- Professional, enterprise-ready aesthetic
- Easy to read at a glance
- Color-coded clearly (green = us, red = them)
- Include checkmarks for wins, X marks for losses
- Make it look like a Gartner or Forrester comparison graphic
- Data-rich but not cluttered
Generate a visually compelling infographic that sales reps can share with prospects."""
try:
client = Client()
response = await client.aio.models.generate_content(
model="gemini-3-pro-image-preview",
contents=prompt,
config=types.GenerateContentConfig(
response_modalities=["TEXT", "IMAGE"]
)
)
# Look for image in response
for part in response.candidates[0].content.parts:
if part.inline_data and part.inline_data.mime_type.startswith("image/"):
image_bytes = part.inline_data.data
mime_type = part.inline_data.mime_type
# Save as ADK artifact
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
ext = "png" if "png" in mime_type else "jpg"
artifact_name = f"comparison_infographic_{timestamp}.{ext}"
image_artifact = types.Part.from_bytes(data=image_bytes, mime_type=mime_type)
version = await tool_context.save_artifact(filename=artifact_name, artifact=image_artifact)
logger.info(f"Saved comparison infographic: {artifact_name} (version {version})")
# Also save to outputs folder
filepath = OUTPUTS_DIR / artifact_name
filepath.write_bytes(image_bytes)
return {
"status": "success",
"message": f"Comparison infographic saved as '{artifact_name}' - view in Artifacts tab",
"artifact": artifact_name,
"version": version,
"comparison": f"{your_product_name} vs {competitor_name}"
}
return {
"status": "partial",
"message": "Image generation not available, text description provided",
"description": response.text if response.text else "No content generated"
}
except Exception as e:
logger.error(f"Error generating comparison infographic: {e}")
return {"status": "error", "message": str(e)}
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_sales_intelligence_agent_team/tools.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/single_agent_apps/research_agent_gemini_interaction_api/research_planner_executor_agent.py | """Research Planner using Gemini Interactions API - demonstrates stateful conversations, model mixing, and background execution."""
import streamlit as st, time, re
from google import genai
def get_text(outputs): return "\n".join(o.text for o in (outputs or []) if hasattr(o, 'text') and o.text) or ""
def parse_tasks(text):
return [{"num": m.group(1), "text": m.group(2).strip().replace('\n', ' ')}
for m in re.finditer(r'^(\d+)[\.\)\-]\s*(.+?)(?=\n\d+[\.\)\-]|\n\n|\Z)', text, re.MULTILINE | re.DOTALL)]
def wait_for_completion(client, iid, timeout=300):
progress, status, elapsed = st.progress(0), st.empty(), 0
while elapsed < timeout:
interaction = client.interactions.get(iid)
if interaction.status != "in_progress": progress.progress(100); return interaction
elapsed += 3; progress.progress(min(90, int(elapsed/timeout*100))); status.text(f"⏳ {elapsed}s..."); time.sleep(3)
return client.interactions.get(iid)
# Setup
st.set_page_config(page_title="Research Planner", page_icon="🔬", layout="wide")
st.title("🔬 AI Research Planner & Executor Agent (Gemini Interactions API) ✨")
for k in ["plan_id", "plan_text", "tasks", "research_id", "research_text", "synthesis_text", "infographic"]:
if k not in st.session_state: st.session_state[k] = [] if k == "tasks" else None
with st.sidebar:
api_key = st.text_input("🔑 Google API Key", type="password")
if st.button("Reset"): [setattr(st.session_state, k, [] if k == "tasks" else None) for k in ["plan_id", "plan_text", "tasks", "research_id", "research_text", "synthesis_text", "infographic"]]; st.rerun()
st.markdown("""
### How It Works
1. **Plan** → Gemini 3 Flash creates research tasks
2. **Select** → Choose which tasks to research
3. **Research** → Deep Research Agent investigates
4. **Synthesize** → Gemini 3 Pro writes report + TL;DR infographic
Each phase chains via `previous_interaction_id` for context.
""")
client = genai.Client(api_key=api_key) if api_key else None
if not client: st.info("👆 Enter API key to start"); st.stop()
# Phase 1: Plan
research_goal = st.text_area("📝 Research Goal", placeholder="e.g., Research B2B HR SaaS market in Germany")
if st.button("📋 Generate Plan", disabled=not research_goal, type="primary"):
with st.spinner("Planning..."):
try:
i = client.interactions.create(model="gemini-3-flash-preview", input=f"Create a numbered research plan for: {research_goal}\n\nFormat: 1. [Task] - [Details]\n\nInclude 5-8 specific tasks.", tools=[{"type": "google_search"}], store=True)
st.session_state.plan_id, st.session_state.plan_text, st.session_state.tasks = i.id, get_text(i.outputs), parse_tasks(get_text(i.outputs))
except Exception as e: st.error(f"Error: {e}")
# Phase 2: Select & Research
if st.session_state.plan_text:
st.divider(); st.subheader("🔍 Select Tasks & Research")
selected = [f"{t['num']}. {t['text']}" for t in st.session_state.tasks if st.checkbox(f"**{t['num']}.** {t['text']}", True, key=f"t{t['num']}")]
st.caption(f"✅ {len(selected)}/{len(st.session_state.tasks)} selected")
if st.button("🚀 Start Deep Research", type="primary", disabled=not selected):
with st.spinner("Researching (2-5 min)..."):
try:
i = client.interactions.create(agent="deep-research-pro-preview-12-2025", input=f"Research these tasks thoroughly with sources:\n\n" + "\n\n".join(selected), previous_interaction_id=st.session_state.plan_id, background=True, store=True)
i = wait_for_completion(client, i.id)
st.session_state.research_id, st.session_state.research_text = i.id, get_text(i.outputs) or f"Status: {i.status}"
st.rerun()
except Exception as e: st.error(f"Error: {e}")
if st.session_state.research_text:
st.divider(); st.subheader("📄 Research Results"); st.markdown(st.session_state.research_text)
# Phase 3: Synthesis + Infographic
if st.session_state.research_id:
if st.button("📊 Generate Executive Report", type="primary"):
with st.spinner("Synthesizing report..."):
try:
i = client.interactions.create(model="gemini-3-pro-preview", input=f"Create executive report with Summary, Findings, Recommendations, Risks:\n\n{st.session_state.research_text}", previous_interaction_id=st.session_state.research_id, store=True)
st.session_state.synthesis_text = get_text(i.outputs)
except Exception as e: st.error(f"Error: {e}"); st.stop()
with st.spinner("Creating TL;DR infographic..."):
try:
response = client.models.generate_content(
model="gemini-3-pro-image-preview",
contents=f"Create a whiteboard summary infographic for the following: {st.session_state.synthesis_text}"
)
for part in response.candidates[0].content.parts:
if hasattr(part, 'inline_data') and part.inline_data:
st.session_state.infographic = part.inline_data.data
break
except Exception as e: st.warning(f"Infographic error: {e}")
st.rerun()
if st.session_state.synthesis_text:
st.divider(); st.markdown("## 📊 Executive Report")
# TL;DR Infographic at the top
if st.session_state.infographic:
st.markdown("### 🎨 TL;DR")
st.image(st.session_state.infographic, use_container_width=True)
st.divider()
st.markdown(st.session_state.synthesis_text)
st.download_button("📥 Download Report", st.session_state.synthesis_text, "research_report.md", "text/markdown")
st.divider(); st.caption("[Gemini Interactions API](https://ai.google.dev/gemini-api/docs/interactions)") | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/single_agent_apps/research_agent_gemini_interaction_api/research_planner_executor_agent.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_llm_apps/llm_optimization_tools/toonify_token_optimization/quick_test.py | """
Quick test to verify Toonify installation and basic functionality
Run this to quickly see the token savings in action!
"""
import json
from toon import encode, decode
def quick_test():
"""Quick demonstration of Toonify savings."""
print("🎯 TOONIFY QUICK TEST")
print("=" * 60)
# Sample data: Product catalog
products = {
"products": [
{"id": 1, "name": "Laptop", "price": 1299, "stock": 45},
{"id": 2, "name": "Mouse", "price": 79, "stock": 120},
{"id": 3, "name": "Keyboard", "price": 89, "stock": 85},
{"id": 4, "name": "Monitor", "price": 399, "stock": 32},
{"id": 5, "name": "Webcam", "price": 129, "stock": 67}
]
}
# Convert to JSON
json_str = json.dumps(products, indent=2)
json_size = len(json_str)
# Convert to TOON
toon_str = encode(products)
toon_size = len(toon_str)
# Calculate savings
reduction = ((json_size - toon_size) / json_size) * 100
# Display results
print(f"\n📄 JSON ({json_size} bytes):")
print(json_str)
print(f"\n🎯 TOON ({toon_size} bytes):")
print(toon_str)
print(f"\n💰 Size Reduction: {reduction:.1f}%")
print(f" Saved: {json_size - toon_size} bytes")
# Verify roundtrip
decoded = decode(toon_str)
if decoded == products:
print("\n✅ Roundtrip verification: PASSED")
else:
print("\n❌ Roundtrip verification: FAILED")
return False
print("\n" + "=" * 60)
print("✨ Installation verified! Run toonify_demo.py for full demo.")
print("=" * 60)
return True
if __name__ == "__main__":
try:
success = quick_test()
exit(0 if success else 1)
except ImportError as e:
print(f"\n❌ Error: {e}")
print("\n💡 Install dependencies with: pip install -r requirements.txt")
exit(1)
except Exception as e:
print(f"\n❌ Unexpected error: {e}")
exit(1)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_llm_apps/llm_optimization_tools/toonify_token_optimization/quick_test.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Shubhamsaboo/awesome-llm-apps:advanced_llm_apps/llm_optimization_tools/toonify_token_optimization/toonify_app.py | """
Toonify Interactive Streamlit App
Visualize token savings and test TOON format with your own data
"""
import streamlit as st
import json
from toon import encode, decode
import tiktoken
import pandas as pd
def count_tokens(text: str, model: str = "gpt-4") -> int:
"""Count tokens in text."""
encoding = tiktoken.encoding_for_model(model)
return len(encoding.encode(text))
def calculate_cost(tokens: int, model: str = "gpt-4") -> float:
"""Calculate API cost based on token count."""
pricing = {
"gpt-4": 0.03 / 1000, # $0.03 per 1K tokens
"gpt-4-turbo": 0.01 / 1000,
"gpt-3.5-turbo": 0.0015 / 1000,
"claude-3-opus": 0.015 / 1000,
"claude-3-sonnet": 0.003 / 1000,
}
return tokens * pricing.get(model, 0.03 / 1000)
def main():
st.set_page_config(
page_title="Toonify Token Optimizer",
page_icon="🎯",
layout="wide"
)
st.title("🎯 Toonify Token Optimization")
st.markdown("""
Reduce your LLM API costs by **30-60%** using TOON format for structured data!
[GitHub](https://github.com/ScrapeGraphAI/toonify) |
[Documentation](https://docs.scrapegraphai.com/services/toonify)
""")
# Sidebar
with st.sidebar:
st.header("⚙️ Settings")
model = st.selectbox(
"LLM Model",
["gpt-4", "gpt-4-turbo", "gpt-3.5-turbo", "claude-3-opus", "claude-3-sonnet"],
help="Select model for token counting and cost calculation"
)
delimiter = st.selectbox(
"TOON Delimiter",
["comma", "tab", "pipe"],
help="Choose delimiter for array elements"
)
key_folding = st.selectbox(
"Key Folding",
["off", "safe"],
help="Collapse nested single-key chains into dotted paths"
)
st.markdown("---")
st.markdown("### 💡 Quick Tips")
st.info("""
**Best for:**
- Tabular data
- Product catalogs
- Survey responses
- Analytics data
**Avoid for:**
- Highly nested data
- Irregular structures
""")
# Main content
tab1, tab2, tab3 = st.tabs(["📊 Comparison", "✍️ Custom Data", "📈 Benchmark"])
with tab1:
st.header("JSON vs TOON Comparison")
# Example data selector
example = st.selectbox(
"Choose example dataset",
[
"E-commerce Products",
"Customer Orders",
"Survey Responses",
"Analytics Data"
]
)
# Load example data
examples = {
"E-commerce Products": {
"products": [
{"id": 1, "name": "Laptop Pro", "price": 1299, "stock": 45, "rating": 4.5},
{"id": 2, "name": "Magic Mouse", "price": 79, "stock": 120, "rating": 4.2},
{"id": 3, "name": "USB-C Cable", "price": 19, "stock": 350, "rating": 4.8},
{"id": 4, "name": "Keyboard", "price": 89, "stock": 85, "rating": 4.6},
{"id": 5, "name": "Monitor Stand", "price": 45, "stock": 60, "rating": 4.3}
]
},
"Customer Orders": {
"orders": [
{"order_id": "ORD001", "customer": "Alice", "total": 299.99, "status": "shipped"},
{"order_id": "ORD002", "customer": "Bob", "total": 149.50, "status": "processing"},
{"order_id": "ORD003", "customer": "Charlie", "total": 449.99, "status": "delivered"}
]
},
"Survey Responses": {
"responses": [
{"id": 1, "age": 25, "satisfaction": 4, "recommend": True, "comment": "Great service!"},
{"id": 2, "age": 34, "satisfaction": 5, "recommend": True, "comment": "Excellent!"},
{"id": 3, "age": 42, "satisfaction": 3, "recommend": False, "comment": "Could be better"}
]
},
"Analytics Data": {
"pageviews": [
{"page": "/home", "views": 1523, "avg_time": 45, "bounce_rate": 0.32},
{"page": "/products", "views": 892, "avg_time": 120, "bounce_rate": 0.45},
{"page": "/about", "views": 234, "avg_time": 60, "bounce_rate": 0.28}
]
}
}
data = examples[example]
# Convert formats
json_str = json.dumps(data, indent=2)
toon_options = {
'delimiter': delimiter,
'key_folding': key_folding
}
toon_str = encode(data, toon_options)
# Calculate metrics
json_size = len(json_str.encode('utf-8'))
toon_size = len(toon_str.encode('utf-8'))
json_tokens = count_tokens(json_str, model)
toon_tokens = count_tokens(toon_str, model)
size_reduction = ((json_size - toon_size) / json_size) * 100
token_reduction = ((json_tokens - toon_tokens) / json_tokens) * 100
json_cost = calculate_cost(json_tokens, model)
toon_cost = calculate_cost(toon_tokens, model)
cost_savings = json_cost - toon_cost
# Display metrics
col1, col2, col3, col4 = st.columns(4)
with col1:
st.metric("Size Reduction", f"{size_reduction:.1f}%")
with col2:
st.metric("Token Reduction", f"{token_reduction:.1f}%")
with col3:
st.metric("Cost per Call", f"${toon_cost:.6f}", f"-${cost_savings:.6f}")
with col4:
savings_1k = cost_savings * 1000
st.metric("Savings per 1K calls", f"${savings_1k:.2f}")
# Side-by-side comparison
col1, col2 = st.columns(2)
with col1:
st.subheader("📄 JSON Format")
st.code(json_str, language="json")
st.caption(f"Size: {json_size} bytes | Tokens: {json_tokens}")
with col2:
st.subheader("🎯 TOON Format")
st.code(toon_str, language="text")
st.caption(f"Size: {toon_size} bytes | Tokens: {toon_tokens}")
# Cost projection
st.subheader("💰 Cost Savings Projection")
calls = [100, 1_000, 10_000, 100_000, 1_000_000]
json_costs = [json_cost * n for n in calls]
toon_costs = [toon_cost * n for n in calls]
savings = [json_costs[i] - toon_costs[i] for i in range(len(calls))]
df = pd.DataFrame({
"API Calls": [f"{n:,}" for n in calls],
"JSON Cost": [f"${c:.2f}" for c in json_costs],
"TOON Cost": [f"${c:.2f}" for c in toon_costs],
"Savings": [f"${s:.2f}" for s in savings],
"Savings %": [f"{token_reduction:.1f}%" for _ in calls]
})
st.dataframe(df, use_container_width=True)
with tab2:
st.header("Test Your Own Data")
st.markdown("Paste your JSON data below to see how much you can save:")
user_json = st.text_area(
"JSON Data",
value='{\n "items": [\n {"id": 1, "name": "Example", "value": 100}\n ]\n}',
height=300
)
if st.button("🎯 Convert to TOON"):
try:
# Parse JSON
data = json.loads(user_json)
# Convert to TOON
toon_options = {
'delimiter': delimiter,
'key_folding': key_folding
}
toon_str = encode(data, toon_options)
# Calculate savings
json_size = len(user_json.encode('utf-8'))
toon_size = len(toon_str.encode('utf-8'))
json_tokens = count_tokens(user_json, model)
toon_tokens = count_tokens(toon_str, model)
size_reduction = ((json_size - toon_size) / json_size) * 100
token_reduction = ((json_tokens - toon_tokens) / json_tokens) * 100
# Display results
st.success("✅ Conversion successful!")
col1, col2, col3 = st.columns(3)
with col1:
st.metric("Size Reduction", f"{size_reduction:.1f}%")
with col2:
st.metric("Token Reduction", f"{token_reduction:.1f}%")
with col3:
cost_savings = calculate_cost(json_tokens - toon_tokens, model)
st.metric("Savings per call", f"${cost_savings:.6f}")
st.subheader("🎯 TOON Output")
st.code(toon_str, language="text")
# Verify roundtrip
decoded = decode(toon_str)
if decoded == data:
st.success("✅ Roundtrip verification passed!")
else:
st.warning("⚠️ Roundtrip verification failed")
except json.JSONDecodeError as e:
st.error(f"❌ Invalid JSON: {e}")
except Exception as e:
st.error(f"❌ Error: {e}")
with tab3:
st.header("📈 Format Benchmark")
st.markdown("""
Based on benchmarks across 50 real-world datasets:
""")
# Benchmark stats
col1, col2, col3 = st.columns(3)
with col1:
st.metric("Avg Size Reduction", "63.9%")
with col2:
st.metric("Avg Token Reduction", "54.1%")
with col3:
st.metric("Best Case", "73.4%")
# Data type performance
st.subheader("Performance by Data Type")
performance_data = {
"Data Type": ["Tabular", "E-commerce", "Analytics", "Surveys", "Mixed"],
"Token Reduction": [73.4, 68.2, 65.1, 61.5, 48.3],
"Use Case": ["✅ Excellent", "✅ Excellent", "✅ Great", "✅ Great", "✅ Good"]
}
df = pd.DataFrame(performance_data)
st.dataframe(df, use_container_width=True)
st.info("""
**💡 Optimization Tips:**
- Use TOON for uniform, structured data
- Enable key folding for deeply nested objects
- Choose appropriate delimiter based on your data
- Test with your actual data for best results
""")
st.markdown("---")
st.markdown("""
### 🔗 Learn More
- [GitHub Repository](https://github.com/ScrapeGraphAI/toonify)
- [Documentation](https://docs.scrapegraphai.com/services/toonify)
- [Format Specification](https://github.com/toon-format/toon)
""")
if __name__ == "__main__":
main()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_llm_apps/llm_optimization_tools/toonify_token_optimization/toonify_app.py",
"license": "Apache License 2.0",
"lines": 250,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_llm_apps/llm_optimization_tools/toonify_token_optimization/toonify_demo.py | """
Toonify Token Optimization Demo
Demonstrates how to reduce LLM API costs by 30-60% using TOON format
"""
import json
from toon import encode, decode
import tiktoken
from openai import OpenAI
from anthropic import Anthropic
import os
def count_tokens(text: str, model: str = "gpt-4") -> int:
"""Count the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(model)
return len(encoding.encode(text))
def format_comparison_demo():
"""Compare JSON vs TOON format sizes and token counts."""
print("=" * 80)
print("🎯 TOONIFY TOKEN OPTIMIZATION DEMO")
print("=" * 80)
# Example: E-commerce product catalog
products_data = {
"products": [
{
"id": 101,
"name": "Laptop Pro 15",
"category": "Electronics",
"price": 1299.99,
"stock": 45,
"rating": 4.5
},
{
"id": 102,
"name": "Magic Mouse",
"category": "Electronics",
"price": 79.99,
"stock": 120,
"rating": 4.2
},
{
"id": 103,
"name": "USB-C Cable",
"category": "Accessories",
"price": 19.99,
"stock": 350,
"rating": 4.8
},
{
"id": 104,
"name": "Wireless Keyboard",
"category": "Electronics",
"price": 89.99,
"stock": 85,
"rating": 4.6
},
{
"id": 105,
"name": "Monitor Stand",
"category": "Accessories",
"price": 45.99,
"stock": 60,
"rating": 4.3
}
]
}
# Convert to JSON
json_str = json.dumps(products_data, indent=2)
json_size = len(json_str.encode('utf-8'))
json_tokens = count_tokens(json_str)
# Convert to TOON
toon_str = encode(products_data)
toon_size = len(toon_str.encode('utf-8'))
toon_tokens = count_tokens(toon_str)
# Calculate savings
size_reduction = ((json_size - toon_size) / json_size) * 100
token_reduction = ((json_tokens - toon_tokens) / json_tokens) * 100
print("\n📊 FORMAT COMPARISON")
print("-" * 80)
print("\n📄 JSON Format:")
print(json_str)
print(f"\nSize: {json_size} bytes")
print(f"Tokens: {json_tokens}")
print("\n" + "=" * 80)
print("\n🎯 TOON Format:")
print(toon_str)
print(f"\nSize: {toon_size} bytes")
print(f"Tokens: {toon_tokens}")
print("\n" + "=" * 80)
print("\n💰 SAVINGS")
print("-" * 80)
print(f"Size Reduction: {size_reduction:.1f}%")
print(f"Token Reduction: {token_reduction:.1f}%")
# Calculate cost savings
# GPT-4 pricing: $0.03 per 1K tokens (input)
cost_per_token = 0.03 / 1000
json_cost = json_tokens * cost_per_token
toon_cost = toon_tokens * cost_per_token
savings_per_call = json_cost - toon_cost
print(f"\n💵 Cost per API call:")
print(f" JSON: ${json_cost:.6f}")
print(f" TOON: ${toon_cost:.6f}")
print(f" Savings: ${savings_per_call:.6f} ({token_reduction:.1f}%)")
print(f"\n📈 Projected savings:")
print(f" Per 1,000 calls: ${savings_per_call * 1000:.2f}")
print(f" Per 1M calls: ${savings_per_call * 1_000_000:.2f}")
print("\n" + "=" * 80)
return toon_str, products_data
def llm_integration_demo():
"""Demonstrate using TOON format with LLM APIs."""
print("\n🤖 LLM INTEGRATION DEMO")
print("=" * 80)
# Create sample data
customer_orders = {
"orders": [
{"order_id": "ORD001", "customer": "Alice", "total": 299.99, "status": "shipped"},
{"order_id": "ORD002", "customer": "Bob", "total": 149.50, "status": "processing"},
{"order_id": "ORD003", "customer": "Charlie", "total": 449.99, "status": "delivered"},
{"order_id": "ORD004", "customer": "Diana", "total": 89.99, "status": "pending"},
]
}
# Convert to TOON
toon_data = encode(customer_orders)
json_data = json.dumps(customer_orders, indent=2)
print("\n📦 Data to analyze:")
print(toon_data)
# Check if API keys are available
openai_key = os.getenv("OPENAI_API_KEY")
anthropic_key = os.getenv("ANTHROPIC_API_KEY")
if openai_key:
try:
print("\n🔵 Testing with OpenAI GPT-4...")
client = OpenAI(api_key=openai_key)
prompt = f"""Analyze these customer orders and provide a brief summary:
{toon_data}
Provide: 1) Total revenue, 2) Orders by status, 3) Average order value"""
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
max_tokens=200
)
print("\n✅ GPT-4 Response:")
print(response.choices[0].message.content)
# Show token usage
print(f"\n📊 Token Usage:")
print(f" Input tokens: {response.usage.prompt_tokens}")
print(f" Output tokens: {response.usage.completion_tokens}")
print(f" Total tokens: {response.usage.total_tokens}")
# Compare with JSON
json_tokens = count_tokens(prompt.replace(toon_data, json_data))
toon_tokens = response.usage.prompt_tokens
savings = ((json_tokens - toon_tokens) / json_tokens) * 100
print(f"\n💰 Token Savings: {savings:.1f}% (vs JSON)")
except Exception as e:
print(f"❌ OpenAI Error: {e}")
else:
print("\n⚠️ Set OPENAI_API_KEY to test with GPT-4")
if anthropic_key:
try:
print("\n🟣 Testing with Anthropic Claude...")
client = Anthropic(api_key=anthropic_key)
response = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=200,
messages=[{
"role": "user",
"content": f"""Analyze these customer orders and provide a brief summary:
{toon_data}
Provide: 1) Total revenue, 2) Orders by status, 3) Average order value"""
}]
)
print("\n✅ Claude Response:")
print(response.content[0].text)
# Show token usage
print(f"\n📊 Token Usage:")
print(f" Input tokens: {response.usage.input_tokens}")
print(f" Output tokens: {response.usage.output_tokens}")
except Exception as e:
print(f"❌ Anthropic Error: {e}")
else:
print("\n⚠️ Set ANTHROPIC_API_KEY to test with Claude")
def advanced_features_demo():
"""Demonstrate advanced TOON features."""
print("\n⚙️ ADVANCED FEATURES")
print("=" * 80)
# Key folding example
nested_data = {
'api': {
'response': {
'product': {
'title': 'Wireless Keyboard',
'specs': {
'battery': '6 months',
'connectivity': 'Bluetooth 5.0'
}
}
}
}
}
print("\n1️⃣ Key Folding (collapse nested paths)")
print("-" * 80)
# Without key folding
normal_toon = encode(nested_data)
print("Without key folding:")
print(normal_toon)
# With key folding
folded_toon = encode(nested_data, {'key_folding': 'safe'})
print("\nWith key folding:")
print(folded_toon)
print(f"\nSavings: {len(normal_toon)} → {len(folded_toon)} bytes")
# Custom delimiters
print("\n2️⃣ Custom Delimiters")
print("-" * 80)
data = {
"items": [
["Product A", "Description with, commas", 29.99],
["Product B", "Another, description", 39.99]
]
}
print("Tab delimiter (for data with commas):")
tab_toon = encode(data, {'delimiter': 'tab'})
print(tab_toon)
print("\nPipe delimiter:")
pipe_toon = encode(data, {'delimiter': 'pipe'})
print(pipe_toon)
def main():
"""Run all demos."""
# Basic comparison
toon_str, original_data = format_comparison_demo()
# Verify roundtrip
decoded_data = decode(toon_str)
assert decoded_data == original_data, "Roundtrip failed!"
print("\n✅ Roundtrip verification: PASSED")
# LLM integration (optional, requires API keys)
llm_integration_demo()
# Advanced features
advanced_features_demo()
print("\n" + "=" * 80)
print("🎉 Demo completed!")
print("💡 Set OPENAI_API_KEY or ANTHROPIC_API_KEY to test LLM integration")
print("🔗 Learn more: https://github.com/ScrapeGraphAI/toonify")
print("=" * 80)
if __name__ == "__main__":
main()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_llm_apps/llm_optimization_tools/toonify_token_optimization/toonify_demo.py",
"license": "Apache License 2.0",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_llm_apps/chat_with_X_tutorials/chat_with_youtube_videos/test_session_state.py | #!/usr/bin/env python3
"""
Test script to verify the session state improvements work correctly
"""
# Simulate the key functions from the updated app
import tempfile
from typing import Tuple
# Mock streamlit session state for testing
class MockSessionState:
def __init__(self):
self.app = None
self.current_video_url = None
self.transcript_loaded = False
self.transcript_text = None
self.word_count = 0
self.chat_history = []
def test_session_state_logic():
"""Test the session state logic without Streamlit"""
print("🧪 Testing Session State Logic")
print("=" * 40)
# Mock session state
session_state = MockSessionState()
# Test 1: Initial state
print(f"✓ Initial state - transcript_loaded: {session_state.transcript_loaded}")
print(f"✓ Initial state - current_video_url: {session_state.current_video_url}")
print(f"✓ Initial state - chat_history: {len(session_state.chat_history)} entries")
# Test 2: Simulate loading a video
video_url_1 = "https://www.youtube.com/watch?v=9bZkp7q19f0"
mock_transcript = "This is a mock transcript for testing purposes. It contains multiple words."
# Simulate the logic from the app
if video_url_1 != session_state.current_video_url or not session_state.transcript_loaded:
print(f"\n🔍 Loading new video: {video_url_1}")
# Clear previous data if exists (simulate new video)
if session_state.transcript_loaded:
session_state.chat_history = []
print(" ✓ Cleared previous chat history")
# Store new video data
session_state.current_video_url = video_url_1
session_state.transcript_loaded = True
session_state.transcript_text = mock_transcript
session_state.word_count = len(mock_transcript.split())
print(f" ✅ Video loaded successfully")
print(f" 📊 Transcript: {session_state.word_count} words")
# Test 3: Simulate multiple questions without reloading
questions = [
"What is this video about?",
"Can you summarize the main points?",
"What are the key takeaways?"
]
print(f"\n💬 Testing multiple questions...")
for i, question in enumerate(questions, 1):
# Simulate chat without reloading transcript
mock_answer = f"Mock answer {i} for: {question[:30]}..."
session_state.chat_history.append((question, mock_answer))
print(f" Q{i}: {question}")
print(f" A{i}: {mock_answer}")
print(f"\n✓ Chat history now has {len(session_state.chat_history)} entries")
# Test 4: Test loading a different video (should clear history)
video_url_2 = "https://www.youtube.com/watch?v=UF8uR6Z6KLc"
print(f"\n🔄 Loading different video: {video_url_2}")
if video_url_2 != session_state.current_video_url:
# Clear previous data
session_state.chat_history = []
session_state.current_video_url = video_url_2
session_state.transcript_text = "New mock transcript for the second video."
session_state.word_count = len(session_state.transcript_text.split())
print(f" ✅ New video loaded")
print(f" 🗑️ Chat history cleared: {len(session_state.chat_history)} entries")
print(f" 📊 New transcript: {session_state.word_count} words")
# Test 5: Verify no duplicate loading for same URL
print(f"\n🔄 Testing same video URL again: {video_url_2}")
original_word_count = session_state.word_count
if video_url_2 != session_state.current_video_url or not session_state.transcript_loaded:
print(" ❌ Should NOT reload transcript for same URL")
else:
print(" ✅ Correctly skipped reloading for same URL")
print(f" 📊 Word count unchanged: {original_word_count}")
print(f"\n🎉 Session State Logic Test Complete!")
print(f"✅ Final state:")
print(f" - Current video: {session_state.current_video_url}")
print(f" - Transcript loaded: {session_state.transcript_loaded}")
print(f" - Word count: {session_state.word_count}")
print(f" - Chat history: {len(session_state.chat_history)} entries")
if __name__ == "__main__":
test_session_state_logic()
print(f"\n💡 Key Improvements Verified:")
print(f" 1. ✅ Transcript loads only once per video URL")
print(f" 2. ✅ Chat history is preserved for the same video")
print(f" 3. ✅ Chat history is cleared when loading a new video")
print(f" 4. ✅ No redundant API calls for the same URL")
print(f" 5. ✅ Session state properly manages video data") | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_llm_apps/chat_with_X_tutorials/chat_with_youtube_videos/test_session_state.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_seo_audit_team/agent.py | """
On-Page SEO Audit & Optimization Team built with Google ADK.
The workflow runs three specialized agents in sequence:
1. Page Auditor → scrapes the target URL with Firecrawl and extracts the structural audit + keyword focus.
2. SERP Analyst → performs competitive analysis with Google Search using the discovered primary keyword.
3. Optimization Advisor → synthesizes the audit and SERP insights into a prioritized optimization report.
"""
from __future__ import annotations
import os
from typing import List, Optional
from pydantic import BaseModel, Field
from google.adk.agents import LlmAgent, SequentialAgent
from google.adk.tools import google_search
from google.adk.tools.agent_tool import AgentTool
from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset, StdioServerParameters
# =============================================================================
# Output Schemas
# =============================================================================
class HeadingItem(BaseModel):
tag: str = Field(..., description="Heading tag such as h1, h2, h3.")
text: str = Field(..., description="Text content of the heading.")
class LinkCounts(BaseModel):
internal: Optional[int] = Field(None, description="Number of internal links on the page.")
external: Optional[int] = Field(None, description="Number of external links on the page.")
broken: Optional[int] = Field(None, description="Number of broken links detected.")
notes: Optional[str] = Field(
None, description="Additional qualitative observations about linking."
)
class AuditResults(BaseModel):
title_tag: str = Field(..., description="Full title tag text.")
meta_description: str = Field(..., description="Meta description text.")
primary_heading: str = Field(..., description="Primary H1 heading on the page.")
secondary_headings: List[HeadingItem] = Field(
default_factory=list, description="Secondary headings (H2-H4) in reading order."
)
word_count: Optional[int] = Field(
None, description="Approximate number of words in the main content."
)
content_summary: str = Field(
..., description="Summary of the main topics and structure of the content."
)
link_counts: LinkCounts = Field(
...,
description="Quantitative snapshot of internal/external/broken links.",
)
technical_findings: List[str] = Field(
default_factory=list,
description="List of notable technical SEO issues (e.g., missing alt text, slow LCP).",
)
content_opportunities: List[str] = Field(
default_factory=list,
description="Observed content gaps or opportunities for improvement.",
)
class TargetKeywords(BaseModel):
primary_keyword: str = Field(..., description="Most likely primary keyword target.")
secondary_keywords: List[str] = Field(
default_factory=list, description="Related secondary or supporting keywords."
)
search_intent: str = Field(
...,
description="Dominant search intent inferred from the page (informational, transactional, etc.).",
)
supporting_topics: List[str] = Field(
default_factory=list,
description="Cluster of supporting topics or entities that reinforce the keyword strategy.",
)
class PageAuditOutput(BaseModel):
audit_results: AuditResults = Field(..., description="Structured on-page audit findings.")
target_keywords: TargetKeywords = Field(
..., description="Keyword focus derived from page content."
)
class SerpResult(BaseModel):
rank: int = Field(..., description="Organic ranking position.")
title: str = Field(..., description="Title of the search result.")
url: str = Field(..., description="Landing page URL.")
snippet: str = Field(..., description="SERP snippet or summary.")
content_type: str = Field(
..., description="Content format (blog post, landing page, tool, video, etc.)."
)
class SerpAnalysis(BaseModel):
primary_keyword: str = Field(..., description="Keyword used for SERP research.")
top_10_results: List[SerpResult] = Field(
..., description="Top organic competitors for the keyword."
)
title_patterns: List[str] = Field(
default_factory=list,
description="Common patterns or phrases used in competitor titles.",
)
content_formats: List[str] = Field(
default_factory=list,
description="Typical content formats found (guides, listicles, comparison pages, etc.).",
)
people_also_ask: List[str] = Field(
default_factory=list,
description="Representative questions surfaced in People Also Ask.",
)
key_themes: List[str] = Field(
default_factory=list,
description="Notable recurring themes, features, or angles competitors emphasize.",
)
differentiation_opportunities: List[str] = Field(
default_factory=list,
description="Opportunities to stand out versus competitors.",
)
class OptimizationRecommendation(BaseModel):
priority: str = Field(..., description="Priority level (P0, P1, P2).")
area: str = Field(..., description="Optimization focus area (content, technical, UX, etc.).")
recommendation: str = Field(..., description="Recommended action.")
rationale: str = Field(..., description="Why this change matters, referencing audit/SERP data.")
expected_impact: str = Field(..., description="Anticipated impact on SEO or user metrics.")
effort: str = Field(..., description="Relative effort required (low/medium/high).")
# =============================================================================
# Tools
# =============================================================================
# Firecrawl MCP Toolset - connects to Firecrawl's MCP server for web scraping
firecrawl_toolset = MCPToolset(
connection_params=StdioServerParameters(
command='npx',
args=[
"-y", # Auto-confirm npm package installation
"firecrawl-mcp", # The Firecrawl MCP server package
],
env={
"FIRECRAWL_API_KEY": os.getenv("FIRECRAWL_API_KEY", "")
}
),
# Filter to use only the scrape tool for this agent
tool_filter=['firecrawl_scrape']
)
# =============================================================================
# Helper Agents
# =============================================================================
search_executor_agent = LlmAgent(
name="perform_google_search",
model="gemini-2.5-flash",
description="Executes Google searches for provided queries and returns structured results.",
instruction="""The latest user message contains the keyword to search.
- Call google_search with that exact query and fetch the top organic results (aim for 10).
- Respond with JSON text containing the query and an array of result objects (title, url, snippet). Use an empty array when nothing is returned.
- No additional commentary—return JSON text only.""",
tools=[google_search],
)
google_search_tool = AgentTool(search_executor_agent)
# =============================================================================
# Agent Definitions
# =============================================================================
page_auditor_agent = LlmAgent(
name="PageAuditorAgent",
model="gemini-2.5-flash",
description=(
"Scrapes the target URL, performs a structural on-page SEO audit, and extracts keyword signals."
),
instruction="""You are Agent 1 in a sequential SEO workflow. Your role is to gather data silently for the next agents.
STEP 1: Extract the URL
- Look for a URL in the user's message (it will start with http:// or https://)
- Example: If user says "Audit https://theunwindai.com", extract "https://theunwindai.com"
STEP 2: Call firecrawl_scrape
- Call `firecrawl_scrape` with these exact parameters:
url: <the URL you extracted>
formats: ["markdown", "html", "links"]
onlyMainContent: true
timeout: 90000
- Note: timeout is 90 seconds (90000ms)
STEP 3: Analyze the scraped data
- Parse the markdown content to find title tag, meta description, H1, H2-H4 headings
- Count words in the main content
- Count internal and external links
- Identify technical SEO issues
- Identify content opportunities
STEP 4: Infer keywords
- Based on the page content, determine the primary keyword (1-3 words)
- Identify 2-5 secondary keywords
- Determine search intent (informational, transactional, navigational, commercial)
- List 3-5 supporting topics
STEP 5: Return JSON
- Populate EVERY field in the PageAuditOutput schema with actual data
- Use "Not available" only if truly missing from scraped data
- Return ONLY valid JSON, no extra text before or after""",
tools=[firecrawl_toolset],
output_schema=PageAuditOutput,
output_key="page_audit",
)
serp_analyst_agent = LlmAgent(
name="SerpAnalystAgent",
model="gemini-2.5-flash",
description=(
"Researches the live SERP for the discovered primary keyword and summarizes the competitive landscape."
),
instruction="""You are Agent 2 in the workflow. Your role is to silently gather SERP data for the final report agent.
STEP 1: Get the primary keyword
- Read `state['page_audit']['target_keywords']['primary_keyword']`
- Example: if it's "AI tools", you'll use that for search
STEP 2: Call perform_google_search
- IMPORTANT: You MUST call the `perform_google_search` tool
- Pass the primary keyword as the request parameter
- Example: if primary_keyword is "AI tools", call perform_google_search with request="AI tools"
STEP 3: Parse search results
- You should receive 10+ search results with title, url, snippet
- For each result (up to 10):
* Assign rank (1-10)
* Extract title
* Extract URL
* Extract snippet
* Infer content_type (blog post, landing page, tool, directory, video, etc.)
STEP 4: Analyze patterns
- title_patterns: Common words/phrases in titles (e.g., "Best", "Top 10", "Free", year)
- content_formats: Types you see (guides, listicles, comparison pages, tool directories)
- people_also_ask: Related questions (infer from snippets if not explicit)
- key_themes: Recurring topics across results
- differentiation_opportunities: Gaps or unique angles not covered by competitors
STEP 5: Return JSON
- Populate ALL fields in SerpAnalysis schema
- top_10_results MUST have 10 items (or as many as you found)
- DO NOT return empty arrays unless search truly failed
- Return ONLY valid JSON, no extra text""",
tools=[google_search_tool],
output_schema=SerpAnalysis,
output_key="serp_analysis",
)
optimization_advisor_agent = LlmAgent(
name="OptimizationAdvisorAgent",
model="gemini-2.5-flash",
description="Synthesizes the audit and SERP findings into a prioritized optimization roadmap.",
instruction="""You are Agent 3 and the final expert in the workflow. You create the user-facing report.
STEP 1: Review the data
- Read `state['page_audit']` for:
* Title tag, meta description, H1
* Word count, headings structure
* Link counts
* Technical findings
* Content opportunities
* Primary and secondary keywords
- Read `state['serp_analysis']` for:
* Top 10 competitors
* Title patterns
* Content formats
* Key themes
* Differentiation opportunities
STEP 2: Create the report
Start with "# SEO Audit Report" and include these sections:
1. **Executive Summary** (2-3 paragraphs)
- Page being audited
- Primary keyword focus
- Key strengths and weaknesses
2. **Technical & On-Page Findings**
- Current title tag and suggestions
- Current meta description and suggestions
- H1 and heading structure analysis
- Word count and content depth
- Link profile (internal/external counts)
- Technical issues found
3. **Keyword Analysis**
- Primary keyword: [from state]
- Secondary keywords: [list from state]
- Search intent: [from state]
- Supporting topics: [list from state]
4. **Competitive SERP Analysis**
- What top competitors are doing
- Common title patterns
- Dominant content formats
- Key themes in top results
- Content gaps/opportunities
5. **Prioritized Recommendations**
Group by P0/P1/P2 with:
- Specific action
- Rationale (cite data)
- Expected impact
- Effort level
6. **Next Steps**
- Measurement plan
- Timeline suggestions
STEP 3: Output
- Return ONLY Markdown
- NO JSON
- NO preamble text
- Start directly with "# SEO Audit Report"
- Be specific with data points (e.g., "Current title is X characters, recommend Y"
""",
)
seo_audit_team = SequentialAgent(
name="SeoAuditTeam",
description=(
"Runs a three-agent sequential pipeline that audits a page, researches SERP competitors, "
"and produces an optimization plan."
),
sub_agents=[
page_auditor_agent,
serp_analyst_agent,
optimization_advisor_agent,
],
)
# Expose the root agent for the ADK runtime and Dev UI.
root_agent = seo_audit_team
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_seo_audit_team/agent.py",
"license": "Apache License 2.0",
"lines": 293,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/multimodal_uiux_feedback_agent_team/agent.py | from google.adk.agents import LlmAgent, SequentialAgent
from google.adk.tools import google_search
from google.adk.tools.agent_tool import AgentTool
from .tools import (
edit_landing_page_image,
generate_improved_landing_page,
)
# ============================================================================
# Helper Tool Agent (wraps google_search)
# ============================================================================
search_agent = LlmAgent(
name="SearchAgent",
model="gemini-2.5-flash",
description="Searches for UI/UX best practices, design trends, and accessibility guidelines",
instruction="Use google_search to find current UI/UX trends, design principles, WCAG guidelines, and industry best practices. Be concise and cite authoritative sources.",
tools=[google_search],
)
# ============================================================================
# Specialist Agent 1: Info Agent (for general inquiries)
# ============================================================================
info_agent = LlmAgent(
name="InfoAgent",
model="gemini-2.5-flash",
description="Handles general questions and provides system information about the UI/UX feedback team",
instruction="""
You are the Info Agent for the AI UI/UX Feedback Team.
WHEN TO USE: The coordinator routes general questions and casual greetings to you.
YOUR RESPONSE:
- Keep it brief and helpful (2-4 sentences)
- Explain the system analyzes landing pages using AI vision
- Mention capabilities: image analysis, constructive feedback, automatic improvements, comprehensive reports
- Ask them to upload a landing page screenshot for analysis
EXAMPLE:
"Hi! I'm part of the AI UI/UX Feedback Team. We analyze landing page designs using advanced AI vision, provide detailed constructive feedback on layout, typography, colors, and CTAs, then automatically generate improved versions with our recommendations applied. Upload a screenshot of your landing page and I'll get our expert team to review it!"
Be enthusiastic about design and helpful!
""",
)
# ============================================================================
# Specialist Agent 2: Design Editor (for iterative refinements)
# ============================================================================
design_editor = LlmAgent(
name="DesignEditor",
model="gemini-2.5-flash",
description="Edits existing landing page designs based on specific feedback or refinement requests",
instruction="""
You refine existing landing page designs based on user feedback.
**TASK**: User wants to modify an existing design (e.g., "make the CTA button larger", "use a different color scheme", "improve the hero section").
**CRITICAL**: Find the most recent design filename from conversation history!
Look for: "Saved as artifact: [filename]" or "landing_page_v1.png" type references.
Use **edit_landing_page_image** tool:
Parameters:
1. artifact_filename: The exact filename of the most recent design
2. prompt: Very specific edit instruction with UI/UX context
3. asset_name: Base name without _vX (e.g., "landing_page_improved")
**Example:**
User: "Make the CTA button more prominent"
Last design: "landing_page_improved_v1.png"
Call: edit_landing_page_image(
artifact_filename="landing_page_improved_v1.png",
prompt="Increase the CTA button size by 20%, use a high-contrast color (vibrant orange #FF6B35) to make it stand out more against the background. Add subtle shadow for depth. Ensure the button text is bold and clearly readable. Keep all other design elements unchanged.",
asset_name="landing_page_improved"
)
Be SPECIFIC in prompts and apply UI/UX best practices:
- Visual hierarchy (size, color, contrast)
- Whitespace and breathing room
- Typography hierarchy
- Color psychology
- Accessibility (WCAG)
After editing, briefly explain the UI/UX rationale for the changes.
""",
tools=[edit_landing_page_image],
)
# ============================================================================
# Specialist Agents 3-5: Full Analysis Pipeline (SequentialAgent)
# ============================================================================
ui_critic = LlmAgent(
name="UICritic",
model="gemini-2.5-flash",
description="Analyzes landing page design and provides comprehensive UI/UX feedback using visual AI",
instruction="""
You are a Senior UI/UX Designer with expertise in conversion optimization and accessibility.
**YOUR ROLE**: Analyze uploaded landing page images and provide expert, actionable feedback.
**IMPORTANT**: You can SEE and ANALYZE uploaded images directly using your vision capabilities.
The images are automatically visible to you in the conversation - no tools needed.
Focus on providing detailed analysis and specific recommendations.
## Analysis Framework
When you see a landing page image, examine it across these dimensions:
### 1. First Impression (1-10 rating)
- Visual appeal and professionalism
- Brand perception and trust signals
- Emotional impact
### 2. Layout & Visual Hierarchy ⭐ HIGH PRIORITY
- Hero section effectiveness (headline, subheadline, imagery)
- F-pattern or Z-pattern adherence
- Element sizing and positioning
- Above-the-fold content quality
- Alignment and grid usage
- Section spacing and flow
### 3. Typography
- Font choices (modern, professional, readable?)
- Heading hierarchy (H1, H2, H3 distinction)
- Body text readability (size 16px+, line height 1.5+, line length)
- Font pairing harmony
- Text contrast with background
### 4. Color Scheme & Contrast
- Brand color consistency
- Color psychology alignment with purpose
- Sufficient contrast for readability (WCAG AA: 4.5:1 for text)
- Color harmony (complementary, analogous, triadic?)
- Emotional response appropriateness
### 5. Call-to-Action (CTA) ⭐ CRITICAL
- CTA visibility and prominence (size, color, placement)
- Action-oriented copy ("Get Started" vs "Submit")
- Button design (contrast, hover states implied)
- Multiple CTAs coordination (primary vs secondary)
- Above-the-fold CTA presence
### 6. Whitespace & Balance
- Adequate breathing room around elements
- Cluttered vs clean sections
- Visual weight distribution
- Margins and padding consistency
### 7. Content Structure
- Information architecture clarity
- Content scanability
- Social proof placement (testimonials, logos, stats)
- Trust elements (security badges, guarantees)
### 8. Mobile Responsiveness Considerations
- Elements that may not translate well to mobile
- Touch target sizes
- Mobile-first design principles
## Output Structure
Provide feedback in this format:
**🎯 OVERALL IMPRESSION**
[Rating and 2-3 sentence summary]
**✅ WHAT WORKS WELL**
[List 3-5 strengths]
**⚠️ CRITICAL ISSUES** (High Priority)
1. [Issue with severity and specific location]
2. [Issue with severity and specific location]
3. [Issue with severity and specific location]
**📋 ADDITIONAL IMPROVEMENTS** (Medium/Low Priority)
[4-6 additional suggestions]
**🚀 TOP 3 IMPACT PRIORITIES**
1. [Most impactful change]
2. [Second most impactful change]
3. [Third most impactful change]
**📊 DETAILED SCORES**
- Layout & Hierarchy: X/10
- Typography: X/10
- Color & Contrast: X/10
- CTA Effectiveness: X/10
- Whitespace & Balance: X/10
**IMPORTANT**: At the end of your analysis, output a structured summary:
```
ANALYSIS COMPLETE
Images Analyzed: [Yes/No - describe what you see]
Key Issues Identified: [number]
Critical Priority: [main issue]
Target Audience: [detected or general]
```
Be DETAILED and SPECIFIC in your analysis - this drives the quality of the improvement plan and generated design.
**IF NO IMAGE IS VISIBLE**: Ask the user to upload a landing page screenshot so you can provide analysis.
""",
tools=[AgentTool(search_agent)],
)
design_strategist = LlmAgent(
name="DesignStrategist",
model="gemini-2.5-flash",
description="Creates detailed improvement plan based on UI/UX analysis",
instruction="""
Read from state: latest_analysis, key issues, priorities
You are a Design Strategist who creates actionable improvement plans.
**YOUR TASK**: Based on the UI Critic's analysis, create a SPECIFIC, DETAILED plan for improvements.
## Improvement Plan Structure
### 🎯 Design Strategy Overview
- Primary goal: [conversion optimization/brand awareness/user engagement]
- Target user: [persona]
- Key improvement theme: [modernization/simplification/boldness/etc.]
### 📐 Layout & Structure Improvements
**Changes to make:**
- Hero section: [specific modifications to headline, subheadline, imagery, CTA]
- Visual hierarchy: [size adjustments, reordering, emphasis changes]
- Grid system: [alignment fixes, column structure]
- Whitespace: [specific areas to add/reduce space]
### 🎨 Visual Design Improvements
**Color Palette:**
- Primary: [specific color with hex code and usage]
- Secondary: [specific color with hex code and usage]
- Accent (CTA): [high-contrast color with hex code]
- Background: [specific shade]
- Text colors: [with contrast ratios]
**Typography:**
- Heading font: [font name, size, weight]
- Body font: [font name, size, line height]
- CTA text: [font treatment]
- Hierarchy: [H1: Xpx, H2: Xpx, Body: 16-18px]
### 🎯 CTA Optimization
- Primary CTA: [exact text, color, size, placement]
- Secondary CTA: [if applicable]
- Button design: [shape, padding, shadow, hover effect]
### ♿ Accessibility Enhancements
- Contrast improvements needed: [specific areas]
- Font size increases: [where]
- Alt text considerations
- Focus states for interactive elements
### 📱 Mobile Considerations
- Elements to stack vertically
- Font size adjustments for mobile
- Touch target sizes (minimum 44x44px)
### 🔤 Content Recommendations
- Headline improvements: [more compelling/clearer]
- Subheadline clarity
- CTA copy: [action-oriented language]
- Trust signals to add/improve
**IMPORTANT: At the end, provide:**
```
DESIGN PLAN COMPLETE
Improvement Categories: [Layout, Color, Typography, CTA, Accessibility]
Estimated Impact: [High/Medium/Low]
Implementation Complexity: [Simple/Moderate/Complex]
Ready for visual implementation.
```
Be ULTRA-SPECIFIC with colors (hex codes), sizes (px), and placements. This drives the image generation quality.
""",
tools=[AgentTool(search_agent)],
)
visual_implementer = LlmAgent(
name="VisualImplementer",
model="gemini-2.5-flash",
description="Generates improved landing page design and creates comprehensive report",
instruction="""
Read conversation history to extract:
- UI Critic's detailed analysis
- Design Strategist's improvement plan
- Original landing page image (if visible in conversation)
**IMPORTANT**: You have VISION CAPABILITIES and can see images in the conversation.
If there's an original landing page image visible, use it as inspiration for the improved version.
**YOUR TASK**: Generate an improved landing page implementing ALL recommendations
Use **generate_improved_landing_page** tool with an EXTREMELY DETAILED prompt.
**Build the prompt by incorporating:**
From UI Critic:
- Critical issues to fix
- Top 3 priorities
- What currently works well (preserve these)
From Design Strategist:
- Exact color palette (with hex codes)
- Typography specifications (fonts, sizes, weights)
- Layout structure and hierarchy
- CTA design details
- Whitespace improvements
**Prompt Structure:**
"Professional landing page design with modern UI/UX best practices applied.
**Layout & Hierarchy:**
[Detailed description of hero section, content structure, visual flow]
**Color Palette:**
- Primary: [color name + hex code]
- Secondary: [color name + hex code]
- CTA/Accent: [high-contrast color + hex code]
- Background: [color + hex code]
- Text: [color with contrast ratio]
**Typography:**
- Headlines: [font, size, weight, color] - Clear hierarchy with [X]px for H1
- Body text: [font, 16-18px, line-height 1.6, color] - Highly readable
- CTA text: [font, size, weight] - Action-oriented
**Call-to-Action:**
[Detailed CTA button design: size, color, text, placement, shadow/effects]
**Visual Elements:**
- Hero image/graphic: [description]
- Section images: [description]
- Icons: [style and placement]
- Social proof: [testimonials, logos, stats placement]
**Whitespace & Balance:**
[Specific spacing between sections, margins, padding]
**Accessibility:**
- WCAG AA compliant contrast ratios
- Readable font sizes (16px minimum)
- Clear focus states
**Style:**
- Modern, clean, professional
- [Additional style keywords from analysis]
- High-quality UI design, Dribbble/Behance quality
Camera/Quality: Desktop web design screenshot, 16:9 aspect ratio, professional UI/UX portfolio quality"
Parameters:
- prompt: [your ultra-detailed prompt above]
- aspect_ratio: "16:9"
- asset_name: "landing_page_improved"
- reference_image: [filename of original if available]
**After generating the improved design, provide a brief summary:**
Describe the key improvements in 3-4 sentences:
- What critical issues were addressed
- Main visual/design changes applied
- Expected impact on user experience and conversion
**Example:**
"✅ **Improved Landing Page Generated!**
**Key Improvements Applied:**
- ✨ Enhanced visual hierarchy with larger hero headline (48px) and prominent CTA
- 🎨 Implemented high-contrast color scheme (#FF6B35 accent) with WCAG AA compliance
- 📝 Improved typography with clear heading hierarchy and 18px readable body text
- 🎯 Redesigned CTA button with vibrant accent color and better placement above-the-fold
- 💨 Optimized whitespace for better content flow and readability
The new design addresses all critical issues identified in the analysis and follows modern UI/UX best practices."
""",
tools=[generate_improved_landing_page],
)
# Create the analysis pipeline (runs only when coordinator routes analysis requests here)
analysis_pipeline = SequentialAgent(
name="AnalysisPipeline",
description="Full UI/UX analysis pipeline: Image Analysis → Design Strategy → Visual Implementation",
sub_agents=[
ui_critic,
design_strategist,
visual_implementer,
],
)
# ============================================================================
# Coordinator/Dispatcher (Root Agent)
# ============================================================================
root_agent = LlmAgent(
name="UIUXFeedbackTeam",
model="gemini-2.5-flash",
description="Intelligent coordinator that routes UI/UX feedback requests to the appropriate specialist or analysis pipeline. Supports landing page image analysis!",
instruction="""
You are the Coordinator for the AI UI/UX Feedback Team.
YOUR ROLE: Analyze the user's request and route it to the right specialist using transfer_to_agent.
**IMPORTANT**: You have VISION CAPABILITIES. If you see an image in the conversation, route to AnalysisPipeline immediately.
ROUTING LOGIC:
1. **For general questions/greetings** (NO images present):
→ transfer_to_agent to "InfoAgent"
→ Examples: "hi", "what can you do?", "how does this work?", "what is UI/UX?"
2. **For editing EXISTING designs** (only if a design was already generated):
→ transfer_to_agent to "DesignEditor"
→ Examples: "make the CTA bigger", "change the color scheme", "improve the hero section", "make it more modern"
→ User wants to MODIFY an existing improved design
→ Check: Was an improved design generated earlier in this conversation?
3. **For NEW landing page analysis** (PRIORITY ROUTE):
→ transfer_to_agent to "AnalysisPipeline"
→ Examples: "analyze this landing page", "review my design", "give me feedback"
→ **CRITICAL**: If you SEE an image in the conversation → ALWAYS route here!
→ First-time analysis or new project
→ This runs the full pipeline: UI Critic → Design Strategist → Visual Implementer
CRITICAL: You MUST use transfer_to_agent - don't answer directly!
Decision flow:
- **Image visible in conversation** → IMMEDIATELY transfer to AnalysisPipeline
- Design exists + wants changes → DesignEditor
- No image + asking questions → InfoAgent
Be a smart router - prioritize image analysis!
""",
sub_agents=[
info_agent,
design_editor,
analysis_pipeline,
],
)
__all__ = ["root_agent"]
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/multimodal_uiux_feedback_agent_team/agent.py",
"license": "Apache License 2.0",
"lines": 354,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/multimodal_uiux_feedback_agent_team/tools.py | import os
import logging
from google import genai
from google.genai import types
from google.adk.tools import ToolContext
from pydantic import BaseModel, Field
from dotenv import load_dotenv
load_dotenv()
# Configure logging
logger = logging.getLogger(__name__)
# ============================================================================
# Helper Functions for Asset Version Management
# ============================================================================
def get_next_version_number(tool_context: ToolContext, asset_name: str) -> int:
"""Get the next version number for a given asset name."""
asset_versions = tool_context.state.get("asset_versions", {})
current_version = asset_versions.get(asset_name, 0)
next_version = current_version + 1
return next_version
def update_asset_version(tool_context: ToolContext, asset_name: str, version: int, filename: str) -> None:
"""Update the version tracking for an asset."""
if "asset_versions" not in tool_context.state:
tool_context.state["asset_versions"] = {}
if "asset_filenames" not in tool_context.state:
tool_context.state["asset_filenames"] = {}
tool_context.state["asset_versions"][asset_name] = version
tool_context.state["asset_filenames"][asset_name] = filename
def create_versioned_filename(asset_name: str, version: int, file_extension: str = "png") -> str:
"""Create a versioned filename for an asset."""
return f"{asset_name}_v{version}.{file_extension}"
async def load_landing_page_image(tool_context: ToolContext, filename: str):
"""Load a landing page image artifact by filename."""
try:
loaded_part = await tool_context.load_artifact(filename)
if loaded_part:
logger.info(f"Successfully loaded landing page image: {filename}")
return loaded_part
else:
logger.warning(f"Landing page image not found: {filename}")
return None
except Exception as e:
logger.error(f"Error loading landing page image {filename}: {e}")
return None
# ============================================================================
# Pydantic Input Models
# ============================================================================
class EditLandingPageInput(BaseModel):
artifact_filename: str = Field(..., description="The filename of the landing page artifact to edit.")
prompt: str = Field(..., description="Detailed description of UI/UX improvements to apply.")
asset_name: str = Field(default=None, description="Optional: specify asset name for the new version.")
class GenerateImprovedLandingPageInput(BaseModel):
prompt: str = Field(..., description="A detailed description of the improved landing page based on feedback.")
aspect_ratio: str = Field(default="16:9", description="The desired aspect ratio. Default is 16:9.")
asset_name: str = Field(default="landing_page_improved", description="Base name for the improved design.")
reference_image: str = Field(default=None, description="Optional: filename of the original landing page to use as reference.")
# ============================================================================
# NOTE: Image Analysis is handled directly by agents with vision capabilities
# Agents with gemini-2.5-flash can see and analyze uploaded images automatically
# No separate image analysis tool is needed
# ============================================================================
# ============================================================================
# Image Editing Tool
# ============================================================================
async def edit_landing_page_image(tool_context: ToolContext, inputs: EditLandingPageInput) -> str:
"""
Edits a landing page image by applying UI/UX improvements.
This tool uses Gemini 2.5 Flash's image generation capabilities to create
an improved version of the landing page based on feedback.
"""
if "GEMINI_API_KEY" not in os.environ and "GOOGLE_API_KEY" not in os.environ:
raise ValueError("GEMINI_API_KEY or GOOGLE_API_KEY environment variable not set.")
logger.info("Starting landing page image editing")
try:
client = genai.Client()
inputs = EditLandingPageInput(**inputs)
# Load the existing landing page image
logger.info(f"Loading artifact: {inputs.artifact_filename}")
try:
loaded_image_part = await tool_context.load_artifact(inputs.artifact_filename)
if not loaded_image_part:
return f"❌ Could not find landing page artifact: {inputs.artifact_filename}"
except Exception as e:
logger.error(f"Error loading artifact: {e}")
return f"Error loading landing page artifact: {e}"
model = "gemini-2.5-flash-image"
# Build edit prompt with UI/UX best practices
enhanced_prompt = f"""
{inputs.prompt}
**Apply these UI/UX best practices while editing:**
- Maintain visual hierarchy (size, color, spacing)
- Ensure sufficient whitespace for breathing room
- Use consistent alignment and grid system
- Make CTAs prominent with contrasting colors
- Improve readability (font size, line height, contrast)
- Follow modern web design principles
- Keep the overall brand aesthetic
Make the improvements look natural and professional.
"""
# Build content parts
content_parts = [loaded_image_part, types.Part.from_text(text=enhanced_prompt)]
contents = [
types.Content(
role="user",
parts=content_parts,
),
]
generate_content_config = types.GenerateContentConfig(
response_modalities=[
"IMAGE",
"TEXT",
],
)
# Determine asset name and generate versioned filename
if inputs.asset_name:
asset_name = inputs.asset_name
else:
current_asset_name = tool_context.state.get("current_asset_name")
if current_asset_name:
asset_name = current_asset_name
else:
base_name = inputs.artifact_filename.split('_v')[0] if '_v' in inputs.artifact_filename else "landing_page"
asset_name = base_name
version = get_next_version_number(tool_context, asset_name)
edited_artifact_filename = create_versioned_filename(asset_name, version)
logger.info(f"Editing landing page with artifact filename: {edited_artifact_filename} (version {version})")
# Edit the image
for chunk in client.models.generate_content_stream(
model=model,
contents=contents,
config=generate_content_config,
):
if (
chunk.candidates is None
or chunk.candidates[0].content is None
or chunk.candidates[0].content.parts is None
):
continue
if chunk.candidates[0].content.parts[0].inline_data and chunk.candidates[0].content.parts[0].inline_data.data:
inline_data = chunk.candidates[0].content.parts[0].inline_data
# Create a Part object from the inline data
edited_image_part = types.Part(inline_data=inline_data)
try:
# Save the edited image as an artifact
version = await tool_context.save_artifact(
filename=edited_artifact_filename,
artifact=edited_image_part
)
# Update version tracking
update_asset_version(tool_context, asset_name, version, edited_artifact_filename)
# Store in session state
tool_context.state["last_edited_landing_page"] = edited_artifact_filename
tool_context.state["current_asset_name"] = asset_name
logger.info(f"Saved edited landing page as artifact '{edited_artifact_filename}' (version {version})")
return f"✅ **Landing page edited successfully!**\n\nSaved as: **{edited_artifact_filename}** (version {version} of {asset_name})\n\nThe landing page has been improved with the UI/UX enhancements."
except Exception as e:
logger.error(f"Error saving edited artifact: {e}")
return f"Error saving edited landing page as artifact: {e}"
else:
if hasattr(chunk, 'text') and chunk.text:
logger.info(f"Model response: {chunk.text}")
return "No edited landing page was generated. Please try again."
except Exception as e:
logger.error(f"Error in edit_landing_page_image: {e}")
return f"An error occurred while editing the landing page: {e}"
# ============================================================================
# Generate Improved Landing Page Tool
# ============================================================================
async def generate_improved_landing_page(tool_context: ToolContext, inputs: GenerateImprovedLandingPageInput) -> str:
"""
Generates an improved landing page based on the analysis and feedback.
This tool creates a new landing page design incorporating all the recommended
UI/UX improvements. Can work with or without a reference image.
"""
if "GEMINI_API_KEY" not in os.environ and "GOOGLE_API_KEY" not in os.environ:
raise ValueError("GEMINI_API_KEY or GOOGLE_API_KEY environment variable not set.")
logger.info("Starting improved landing page generation")
try:
client = genai.Client()
inputs = GenerateImprovedLandingPageInput(**inputs)
# Note: Reference images from the conversation are automatically available to agents
# This parameter is kept for backwards compatibility with saved artifacts
reference_part = None
if inputs.reference_image:
try:
reference_part = await load_landing_page_image(tool_context, inputs.reference_image)
if reference_part:
logger.info(f"Using reference image artifact: {inputs.reference_image}")
except Exception as e:
logger.warning(f"Could not load reference image, proceeding without it: {e}")
# Get the analysis from state to incorporate feedback
latest_analysis = tool_context.state.get("latest_analysis", "")
# Build enhanced prompt
enhancement_prompt = f"""
Create a professional landing page design that incorporates these improvements:
{inputs.prompt}
**Previous Analysis Insights:**
{latest_analysis[:500] if latest_analysis else "No previous analysis available"}
**Design Requirements:**
- Modern, clean aesthetic
- Clear visual hierarchy
- Prominent, well-designed CTAs
- Proper whitespace and breathing room
- Professional typography with clear hierarchy
- Accessible color contrast (WCAG AA)
- Mobile-first responsive considerations
- Follow the latest UI/UX best practices
- High-quality, photorealistic rendering
Aspect ratio: {inputs.aspect_ratio}
Create a professional UI/UX design that would be magazine-quality.
"""
# Prepare content parts
content_parts = [types.Part.from_text(text=enhancement_prompt)]
if reference_part:
content_parts.append(reference_part)
# Generate enhanced prompt first
rewritten_prompt_response = client.models.generate_content(
model="gemini-2.5-flash",
contents=enhancement_prompt
)
rewritten_prompt = rewritten_prompt_response.text
logger.info(f"Enhanced prompt: {rewritten_prompt}")
model = "gemini-2.5-flash-image"
contents = [
types.Content(
role="user",
parts=[types.Part.from_text(text=rewritten_prompt)] + ([reference_part] if reference_part else []),
),
]
generate_content_config = types.GenerateContentConfig(
response_modalities=[
"IMAGE",
"TEXT",
],
)
# Generate versioned filename
version = get_next_version_number(tool_context, inputs.asset_name)
artifact_filename = create_versioned_filename(inputs.asset_name, version)
logger.info(f"Generating improved landing page with filename: {artifact_filename} (version {version})")
# Generate the image
for chunk in client.models.generate_content_stream(
model=model,
contents=contents,
config=generate_content_config,
):
if (
chunk.candidates is None
or chunk.candidates[0].content is None
or chunk.candidates[0].content.parts is None
):
continue
if chunk.candidates[0].content.parts[0].inline_data and chunk.candidates[0].content.parts[0].inline_data.data:
inline_data = chunk.candidates[0].content.parts[0].inline_data
image_part = types.Part(inline_data=inline_data)
try:
version = await tool_context.save_artifact(
filename=artifact_filename,
artifact=image_part
)
update_asset_version(tool_context, inputs.asset_name, version, artifact_filename)
tool_context.state["last_generated_landing_page"] = artifact_filename
tool_context.state["current_asset_name"] = inputs.asset_name
logger.info(f"Saved improved landing page as artifact '{artifact_filename}' (version {version})")
return f"✅ **Improved landing page generated successfully!**\n\nSaved as: **{artifact_filename}** (version {version} of {inputs.asset_name})\n\nThis design incorporates all the recommended UI/UX improvements."
except Exception as e:
logger.error(f"Error saving artifact: {e}")
return f"Error saving improved landing page as artifact: {e}"
else:
if hasattr(chunk, 'text') and chunk.text:
logger.info(f"Model response: {chunk.text}")
return "No improved landing page was generated. Please try again with a more detailed prompt."
except Exception as e:
logger.error(f"Error in generate_improved_landing_page: {e}")
return f"An error occurred while generating the improved landing page: {e}"
# ============================================================================
# Note: No utility tools needed - agents handle everything directly
# ============================================================================
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/multimodal_uiux_feedback_agent_team/tools.py",
"license": "Apache License 2.0",
"lines": 276,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_home_renovation_agent/tools.py | import os
import logging
from google import genai
from google.genai import types
from google.adk.tools import ToolContext
from pydantic import BaseModel, Field
from dotenv import load_dotenv
load_dotenv()
# Configure logging
logger = logging.getLogger(__name__)
# ============================================================================
# Helper Functions for Asset Version Management
# ============================================================================
def get_next_version_number(tool_context: ToolContext, asset_name: str) -> int:
"""Get the next version number for a given asset name."""
asset_versions = tool_context.state.get("asset_versions", {})
current_version = asset_versions.get(asset_name, 0)
next_version = current_version + 1
return next_version
def update_asset_version(tool_context: ToolContext, asset_name: str, version: int, filename: str) -> None:
"""Update the version tracking for an asset."""
if "asset_versions" not in tool_context.state:
tool_context.state["asset_versions"] = {}
if "asset_filenames" not in tool_context.state:
tool_context.state["asset_filenames"] = {}
tool_context.state["asset_versions"][asset_name] = version
tool_context.state["asset_filenames"][asset_name] = filename
# Maintain a list of all versions for this asset
asset_history_key = f"{asset_name}_history"
if asset_history_key not in tool_context.state:
tool_context.state[asset_history_key] = []
tool_context.state[asset_history_key].append({"version": version, "filename": filename})
def create_versioned_filename(asset_name: str, version: int, file_extension: str = "png") -> str:
"""Create a versioned filename for an asset."""
return f"{asset_name}_v{version}.{file_extension}"
def get_asset_versions_info(tool_context: ToolContext) -> str:
"""Get information about all asset versions in the session."""
asset_versions = tool_context.state.get("asset_versions", {})
if not asset_versions:
return "No renovation renderings have been created yet."
info_lines = ["Current renovation renderings:"]
for asset_name, current_version in asset_versions.items():
history_key = f"{asset_name}_history"
history = tool_context.state.get(history_key, [])
total_versions = len(history)
latest_filename = tool_context.state.get("asset_filenames", {}).get(asset_name, "Unknown")
info_lines.append(f" • {asset_name}: {total_versions} version(s), latest is v{current_version} ({latest_filename})")
return "\n".join(info_lines)
def get_reference_images_info(tool_context: ToolContext) -> str:
"""Get information about all reference images (current room/inspiration) uploaded in the session."""
reference_images = tool_context.state.get("reference_images", {})
if not reference_images:
return "No reference images have been uploaded yet."
info_lines = ["Available reference images (current room photos & inspiration):"]
for filename, info in reference_images.items():
version = info.get("version", "Unknown")
image_type = info.get("type", "reference")
info_lines.append(f" • {filename} ({image_type} v{version})")
return "\n".join(info_lines)
async def load_reference_image(tool_context: ToolContext, filename: str):
"""Load a reference image artifact by filename."""
try:
loaded_part = await tool_context.load_artifact(filename)
if loaded_part:
logger.info(f"Successfully loaded reference image: {filename}")
return loaded_part
else:
logger.warning(f"Reference image not found: {filename}")
return None
except Exception as e:
logger.error(f"Error loading reference image {filename}: {e}")
return None
def get_latest_reference_image_filename(tool_context: ToolContext) -> str:
"""Get the filename of the most recently uploaded reference image."""
return tool_context.state.get("latest_reference_image")
# ============================================================================
# Pydantic Input Models
# ============================================================================
class GenerateRenovationRenderingInput(BaseModel):
prompt: str = Field(..., description="A detailed description of the renovated space to generate. Include room type, style, colors, materials, fixtures, lighting, and layout.")
aspect_ratio: str = Field(default="16:9", description="The desired aspect ratio, e.g., '1:1', '16:9', '9:16'. Default is 16:9 for room photos.")
asset_name: str = Field(default="renovation_rendering", description="Base name for the rendering (will be versioned automatically). Use descriptive names like 'kitchen_modern_farmhouse' or 'bathroom_spa'.")
current_room_photo: str = Field(default=None, description="Optional: filename of the current room photo to use as reference for layout/structure.")
inspiration_image: str = Field(default=None, description="Optional: filename of an inspiration image to guide the style. Use 'latest' for most recent upload.")
class EditRenovationRenderingInput(BaseModel):
artifact_filename: str = Field(default=None, description="The filename of the rendering artifact to edit. If not provided, uses the last generated rendering.")
prompt: str = Field(..., description="The prompt describing the desired changes (e.g., 'make cabinets darker', 'add pendant lights', 'change floor to hardwood').")
asset_name: str = Field(default=None, description="Optional: specify asset name for the new version (defaults to incrementing current asset).")
reference_image_filename: str = Field(default=None, description="Optional: filename of a reference image to guide the edit. Use 'latest' for most recent upload.")
# ============================================================================
# Image Generation Tool
# ============================================================================
async def generate_renovation_rendering(tool_context: ToolContext, inputs: GenerateRenovationRenderingInput) -> str:
"""
Generates a photorealistic rendering of a renovated space based on the design plan.
This tool uses Gemini 3 Pro's image generation capabilities to create visual
representations of renovation plans. It can optionally use current room photos
and inspiration images as references.
"""
if "GEMINI_API_KEY" not in os.environ and "GOOGLE_API_KEY" not in os.environ:
raise ValueError("GEMINI_API_KEY or GOOGLE_API_KEY environment variable not set.")
logger.info("Starting renovation rendering generation")
try:
client = genai.Client()
# Handle inputs that might come as dict instead of Pydantic model
if isinstance(inputs, dict):
inputs = GenerateRenovationRenderingInput(**inputs)
# Handle reference images (current room photo or inspiration)
reference_images = []
if inputs.current_room_photo:
current_photo_part = await load_reference_image(tool_context, inputs.current_room_photo)
if current_photo_part:
reference_images.append(current_photo_part)
logger.info(f"Using current room photo: {inputs.current_room_photo}")
if inputs.inspiration_image:
if inputs.inspiration_image == "latest":
insp_filename = get_latest_reference_image_filename(tool_context)
else:
insp_filename = inputs.inspiration_image
if insp_filename:
inspiration_part = await load_reference_image(tool_context, insp_filename)
if inspiration_part:
reference_images.append(inspiration_part)
logger.info(f"Using inspiration image: {insp_filename}")
# Build the enhanced prompt using SLC formula (Subject, Lighting, Camera)
base_rewrite_prompt = f"""
Create an ultra-detailed, photorealistic prompt for generating a professional interior design photograph.
Original description: {inputs.prompt}
**CRITICAL REQUIREMENT - PRESERVE EXACT LAYOUT:**
The generated image MUST maintain the EXACT same room layout, structure, and spatial arrangement described in the prompt:
- Keep all windows, doors, skylights in their exact positions
- Keep all cabinets, counters, appliances in their exact positions
- Keep the same room dimensions and proportions
- Keep the same camera angle/perspective
- ONLY change surface finishes: paint colors, cabinet colors, countertop materials, flooring, backsplash, hardware, and decorative elements
- DO NOT move, add, or remove any structural elements or major fixtures
**Use the SLC Formula for Photorealism:**
1. **SUBJECT (S)** - Be highly specific about details and textures:
- Describe exact materials with rich adjectives (e.g., "smooth matte white shaker-style cabinets", "honed Carrara marble countertops with subtle grey veining")
- Include texture details (e.g., "brushed nickel hardware", "wide-plank oak flooring with natural grain")
- Specify finishes precisely (e.g., "satin finish", "polished", "matte", "textured")
2. **LIGHTING (L)** - Describe lighting conditions that create mood and realism:
- Natural light sources (e.g., "soft morning sunlight streaming through windows", "golden hour warm glow")
- Artificial lighting (e.g., "warm LED under-cabinet lighting", "pendant lights casting gentle shadows")
- Light quality (e.g., "diffused natural light", "dramatic side lighting", "even ambient illumination")
- Shadows and highlights (e.g., "subtle shadows adding depth", "highlights on polished surfaces")
3. **CAMERA (C)** - Include professional photography specifications:
- Camera type: "shot on professional DSLR" or "architectural photography camera"
- Resolution: "8K resolution", "ultra high definition", "HDR"
- Perspective: specific angle (e.g., "wide-angle lens from doorway", "eye-level perspective", "slightly elevated view")
- Depth of field: "sharp focus throughout" or "shallow depth of field with background blur"
- Quality keywords: "professional interior design photography", "magazine quality", "architectural digest style"
**Additional Requirements:**
- Maintain existing spatial layout and dimensions exactly as described
- Include specific color names and codes when mentioned
- Add atmospheric details (e.g., "clean, inviting atmosphere", "modern luxury feel")
- Specify the aspect ratio: {inputs.aspect_ratio}
**Output Format:** Create a single, flowing paragraph that reads like a professional photography brief.
Start with the camera/technical specs, then describe the subject with rich detail, then lighting conditions.
Include keywords: "photorealistic", "8K", "HDR", "professional interior photography", "architectural photography".
Emphasize that the layout must remain unchanged - only surface finishes are modified.
"""
if reference_images:
base_rewrite_prompt += "\n\n**Reference Image Layout:** The reference image shows the EXACT layout that must be preserved. Match the camera angle, room structure, window/door positions, and furniture/appliance placement EXACTLY. Only change the surface finishes and colors. Analyze the lighting in the reference image and replicate it."
# Get enhanced prompt
rewritten_prompt_response = client.models.generate_content(
model="gemini-3-pro-preview",
contents=base_rewrite_prompt
)
rewritten_prompt = rewritten_prompt_response.text
logger.info(f"Enhanced prompt: {rewritten_prompt}")
model = "gemini-3-pro-image-preview"
# Build content parts
content_parts = [types.Part.from_text(text=rewritten_prompt)]
content_parts.extend(reference_images)
contents = [
types.Content(
role="user",
parts=content_parts,
),
]
generate_content_config = types.GenerateContentConfig(
response_modalities=[
"IMAGE",
"TEXT",
],
)
# Generate versioned filename
version = get_next_version_number(tool_context, inputs.asset_name)
artifact_filename = create_versioned_filename(inputs.asset_name, version)
logger.info(f"Generating rendering with artifact filename: {artifact_filename} (version {version})")
# Generate the image
for chunk in client.models.generate_content_stream(
model=model,
contents=contents,
config=generate_content_config,
):
if (
chunk.candidates is None
or chunk.candidates[0].content is None
or chunk.candidates[0].content.parts is None
):
continue
if chunk.candidates[0].content.parts[0].inline_data and chunk.candidates[0].content.parts[0].inline_data.data:
inline_data = chunk.candidates[0].content.parts[0].inline_data
# Create a Part object from the inline data
# The inline_data already contains the mime_type from the API response
image_part = types.Part(inline_data=inline_data)
try:
# Save the image as an artifact
version = await tool_context.save_artifact(
filename=artifact_filename,
artifact=image_part
)
# Update version tracking
update_asset_version(tool_context, inputs.asset_name, version, artifact_filename)
# Store in session state
tool_context.state["last_generated_rendering"] = artifact_filename
tool_context.state["current_asset_name"] = inputs.asset_name
logger.info(f"Saved rendering as artifact '{artifact_filename}' (version {version})")
return f"✅ Renovation rendering generated successfully!\n\nThe rendering has been saved and is available in the artifacts panel. Artifact name: {inputs.asset_name} (version {version}).\n\nNote: The image is stored as an artifact and can be accessed through the session artifacts, not as a direct image link."
except Exception as e:
logger.error(f"Error saving artifact: {e}")
return f"Error saving rendering as artifact: {e}"
else:
# Log any text responses
if hasattr(chunk, 'text') and chunk.text:
logger.info(f"Model response: {chunk.text}")
return "No rendering was generated. Please try again with a more detailed prompt."
except Exception as e:
logger.error(f"Error in generate_renovation_rendering: {e}")
return f"An error occurred while generating the rendering: {e}"
# ============================================================================
# Image Editing Tool
# ============================================================================
async def edit_renovation_rendering(tool_context: ToolContext, inputs: EditRenovationRenderingInput) -> str:
"""
Edits an existing renovation rendering based on feedback or refinements.
This tool allows iterative improvements to the rendered image, such as
changing colors, materials, lighting, or layout elements.
"""
if "GEMINI_API_KEY" not in os.environ and "GOOGLE_API_KEY" not in os.environ:
raise ValueError("GEMINI_API_KEY or GOOGLE_API_KEY environment variable not set.")
logger.info("Starting renovation rendering edit")
try:
client = genai.Client()
# Handle inputs that might come as dict instead of Pydantic model
if isinstance(inputs, dict):
inputs = EditRenovationRenderingInput(**inputs)
# Get artifact_filename from session state if not provided
artifact_filename = inputs.artifact_filename
if not artifact_filename:
artifact_filename = tool_context.state.get("last_generated_rendering")
if not artifact_filename:
return "❌ No artifact_filename provided and no previous rendering found in session. Please generate a rendering first using generate_renovation_rendering."
logger.info(f"Using last generated rendering from session: {artifact_filename}")
# Validate filename format - check for common hallucination patterns
if "_v0." in artifact_filename:
# Version 0 doesn't exist - the first version is always v1
logger.warning(f"Invalid version v0 detected in filename: {artifact_filename}")
corrected_filename = artifact_filename.replace("_v0.", "_v1.")
logger.info(f"Attempting corrected filename: {corrected_filename}")
artifact_filename = corrected_filename
# Load the existing rendering
logger.info(f"Loading artifact: {artifact_filename}")
loaded_image_part = None
try:
loaded_image_part = await tool_context.load_artifact(artifact_filename)
except Exception as e:
logger.error(f"Error loading artifact: {e}")
# If loading failed, try to find the most recent version of this asset
if not loaded_image_part:
# Extract base asset name and try to find any existing version
base_name = artifact_filename.split('_v')[0] if '_v' in artifact_filename else artifact_filename.replace('.png', '')
asset_filenames = tool_context.state.get("asset_filenames", {})
# Check if we have any version of this asset
if base_name in asset_filenames:
fallback_filename = asset_filenames[base_name]
logger.info(f"Attempting fallback to known artifact: {fallback_filename}")
try:
loaded_image_part = await tool_context.load_artifact(fallback_filename)
if loaded_image_part:
artifact_filename = fallback_filename
logger.info(f"Successfully loaded fallback artifact: {fallback_filename}")
except Exception as e:
logger.error(f"Fallback load also failed: {e}")
# Last resort: try the last generated rendering
if not loaded_image_part:
last_rendering = tool_context.state.get("last_generated_rendering")
if last_rendering and last_rendering != artifact_filename:
logger.info(f"Attempting last resort fallback to: {last_rendering}")
try:
loaded_image_part = await tool_context.load_artifact(last_rendering)
if loaded_image_part:
artifact_filename = last_rendering
logger.info(f"Successfully loaded last resort artifact: {last_rendering}")
except Exception as e:
logger.error(f"Last resort load also failed: {e}")
if not loaded_image_part:
available_renderings = get_asset_versions_info(tool_context)
return f"❌ Could not find rendering artifact: {inputs.artifact_filename}\n\n{available_renderings}\n\nPlease use one of the available artifact filenames, or generate a new rendering first."
# Handle reference image if specified
reference_image_part = None
if inputs.reference_image_filename:
if inputs.reference_image_filename == "latest":
ref_filename = get_latest_reference_image_filename(tool_context)
else:
ref_filename = inputs.reference_image_filename
if ref_filename:
reference_image_part = await load_reference_image(tool_context, ref_filename)
if reference_image_part:
logger.info(f"Using reference image for editing: {ref_filename}")
model = "gemini-3-pro-image-preview"
# Build content parts
content_parts = [loaded_image_part, types.Part.from_text(text=inputs.prompt)]
if reference_image_part:
content_parts.append(reference_image_part)
contents = [
types.Content(
role="user",
parts=content_parts,
),
]
generate_content_config = types.GenerateContentConfig(
response_modalities=[
"IMAGE",
"TEXT",
],
)
# Determine asset name and generate versioned filename
if inputs.asset_name:
asset_name = inputs.asset_name
else:
current_asset_name = tool_context.state.get("current_asset_name")
if current_asset_name:
asset_name = current_asset_name
else:
# Extract from filename
base_name = artifact_filename.split('_v')[0] if '_v' in artifact_filename else "renovation_rendering"
asset_name = base_name
version = get_next_version_number(tool_context, asset_name)
edited_artifact_filename = create_versioned_filename(asset_name, version)
logger.info(f"Editing rendering with artifact filename: {edited_artifact_filename} (version {version})")
# Edit the image
for chunk in client.models.generate_content_stream(
model=model,
contents=contents,
config=generate_content_config,
):
if (
chunk.candidates is None
or chunk.candidates[0].content is None
or chunk.candidates[0].content.parts is None
):
continue
if chunk.candidates[0].content.parts[0].inline_data and chunk.candidates[0].content.parts[0].inline_data.data:
inline_data = chunk.candidates[0].content.parts[0].inline_data
# Create a Part object from the inline data
# The inline_data already contains the mime_type from the API response
edited_image_part = types.Part(inline_data=inline_data)
try:
# Save the edited image as an artifact
version = await tool_context.save_artifact(
filename=edited_artifact_filename,
artifact=edited_image_part
)
# Update version tracking
update_asset_version(tool_context, asset_name, version, edited_artifact_filename)
# Store in session state
tool_context.state["last_generated_rendering"] = edited_artifact_filename
tool_context.state["current_asset_name"] = asset_name
logger.info(f"Saved edited rendering as artifact '{edited_artifact_filename}' (version {version})")
return f"✅ Rendering edited successfully!\n\nThe updated rendering has been saved and is available in the artifacts panel. Artifact name: {asset_name} (version {version}).\n\nNote: The image is stored as an artifact and can be accessed through the session artifacts, not as a direct image link."
except Exception as e:
logger.error(f"Error saving edited artifact: {e}")
return f"Error saving edited rendering as artifact: {e}"
else:
# Log any text responses
if hasattr(chunk, 'text') and chunk.text:
logger.info(f"Model response: {chunk.text}")
return "No edited rendering was generated. Please try again."
except Exception as e:
logger.error(f"Error in edit_renovation_rendering: {e}")
return f"An error occurred while editing the rendering: {e}"
# ============================================================================
# Utility Tools
# ============================================================================
async def list_renovation_renderings(tool_context: ToolContext) -> str:
"""Lists all renovation renderings created in this session."""
return get_asset_versions_info(tool_context)
async def list_reference_images(tool_context: ToolContext) -> str:
"""Lists all reference images (current room photos & inspiration) uploaded in this session."""
return get_reference_images_info(tool_context)
async def save_uploaded_image_as_artifact(
tool_context: ToolContext,
image_data: str,
artifact_name: str,
image_type: str = "current_room"
) -> str:
"""
Saves an uploaded image as a named artifact for later use in editing.
This tool is used when the Visual Assessor detects an uploaded image
and wants to make it available for the Project Coordinator to edit.
Args:
tool_context: The tool context
image_data: Base64 encoded image data or image bytes
artifact_name: Name to save the artifact as (e.g., "current_room_1", "inspiration_1")
image_type: Type of image ("current_room" or "inspiration")
Returns:
Success message with the artifact filename
"""
try:
# Create a Part from the image data
# Note: This assumes image_data is already in the right format
# In practice, we'll get this from the message content
# Save as artifact
await tool_context.save_artifact(
filename=artifact_name,
artifact=image_data
)
# Track in state
if "uploaded_images" not in tool_context.state:
tool_context.state["uploaded_images"] = {}
tool_context.state["uploaded_images"][artifact_name] = {
"type": image_type,
"filename": artifact_name
}
if image_type == "current_room":
tool_context.state["current_room_artifact"] = artifact_name
elif image_type == "inspiration":
tool_context.state["inspiration_artifact"] = artifact_name
logger.info(f"Saved uploaded image as artifact: {artifact_name}")
return f"✅ Image saved as artifact: {artifact_name} (type: {image_type}). This can now be used for editing."
except Exception as e:
logger.error(f"Error saving uploaded image: {e}")
return f"❌ Error saving uploaded image: {e}"
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/ai_home_renovation_agent/tools.py",
"license": "Apache License 2.0",
"lines": 436,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:rag_tutorials/contextualai_rag_agent/contextualai_rag_agent.py | import os
import tempfile
import time
from typing import List, Optional, Tuple, Any
import streamlit as st
import requests
import json
import re
from contextual import ContextualAI
def init_session_state() -> None:
if "api_key_submitted" not in st.session_state:
st.session_state.api_key_submitted = False
if "contextual_api_key" not in st.session_state:
st.session_state.contextual_api_key = ""
if "base_url" not in st.session_state:
st.session_state.base_url = "https://api.contextual.ai/v1"
if "agent_id" not in st.session_state:
st.session_state.agent_id = ""
if "datastore_id" not in st.session_state:
st.session_state.datastore_id = ""
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "processed_file" not in st.session_state:
st.session_state.processed_file = False
if "last_raw_response" not in st.session_state:
st.session_state.last_raw_response = None
if "last_user_query" not in st.session_state:
st.session_state.last_user_query = ""
def sidebar_api_form() -> bool:
with st.sidebar:
st.header("API & Resource Setup")
if st.session_state.api_key_submitted:
st.success("API verified")
if st.button("Reset Setup"):
st.session_state.clear()
st.rerun()
return True
with st.form("contextual_api_form"):
api_key = st.text_input("Contextual AI API Key", type="password")
base_url = st.text_input(
"Base URL",
value=st.session_state.base_url,
help="Include /v1 (e.g., https://api.contextual.ai/v1)",
)
existing_agent_id = st.text_input("Existing Agent ID (optional)")
existing_datastore_id = st.text_input("Existing Datastore ID (optional)")
if st.form_submit_button("Save & Verify"):
try:
client = ContextualAI(api_key=api_key, base_url=base_url)
_ = client.agents.list()
st.session_state.contextual_api_key = api_key
st.session_state.base_url = base_url
st.session_state.agent_id = existing_agent_id
st.session_state.datastore_id = existing_datastore_id
st.session_state.api_key_submitted = True
st.success("Credentials verified!")
st.rerun()
except Exception as e:
st.error(f"Credential verification failed: {str(e)}")
return False
def ensure_client():
if not st.session_state.get("contextual_api_key"):
raise ValueError("Contextual AI API key not provided")
return ContextualAI(api_key=st.session_state.contextual_api_key, base_url=st.session_state.base_url)
def create_datastore(client, name: str) -> Optional[str]:
try:
ds = client.datastores.create(name=name)
return getattr(ds, "id", None)
except Exception as e:
st.error(f"Failed to create datastore: {e}")
return None
ALLOWED_EXTS = {".pdf", ".html", ".htm", ".mhtml", ".doc", ".docx", ".ppt", ".pptx"}
def upload_documents(client, datastore_id: str, files: List[bytes], filenames: List[str], metadata: Optional[dict]) -> List[str]:
doc_ids: List[str] = []
for content, fname in zip(files, filenames):
try:
ext = os.path.splitext(fname)[1].lower()
if ext not in ALLOWED_EXTS:
st.error(f"Unsupported file extension for {fname}. Allowed: {sorted(ALLOWED_EXTS)}")
continue
with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as tmp:
tmp.write(content)
tmp_path = tmp.name
with open(tmp_path, "rb") as f:
if metadata:
result = client.datastores.documents.ingest(datastore_id, file=f, metadata=metadata)
else:
result = client.datastores.documents.ingest(datastore_id, file=f)
doc_ids.append(getattr(result, "id", ""))
except Exception as e:
st.error(f"Failed to upload {fname}: {e}")
finally:
try:
os.unlink(tmp_path)
except Exception:
pass
return doc_ids
def wait_until_documents_ready(api_key: str, datastore_id: str, base_url: str, max_checks: int = 30, interval_sec: float = 5.0) -> None:
url = f"{base_url.rstrip('/')}/datastores/{datastore_id}/documents"
headers = {"Authorization": f"Bearer {api_key}"}
for _ in range(max_checks):
try:
resp = requests.get(url, headers=headers, timeout=30)
if resp.status_code == 200:
docs = resp.json().get("documents", [])
if not any(d.get("status") in ("processing", "pending") for d in docs):
return
time.sleep(interval_sec)
except Exception:
time.sleep(interval_sec)
def create_agent(client, name: str, description: str, datastore_id: str) -> Optional[str]:
try:
agent = client.agents.create(name=name, description=description, datastore_ids=[datastore_id])
return getattr(agent, "id", None)
except Exception as e:
st.error(f"Failed to create agent: {e}")
return None
def query_agent(client, agent_id: str, query: str) -> Tuple[str, Any]:
try:
resp = client.agents.query.create(agent_id=agent_id, messages=[{"role": "user", "content": query}])
if hasattr(resp, "content"):
return resp.content, resp
if hasattr(resp, "message") and hasattr(resp.message, "content"):
return resp.message.content, resp
if hasattr(resp, "messages") and resp.messages:
last_msg = resp.messages[-1]
return getattr(last_msg, "content", str(last_msg)), resp
return str(resp), resp
except Exception as e:
return f"Error querying agent: {e}", None
def show_retrieval_info(client, raw_response, agent_id: str) -> None:
try:
if not raw_response:
st.info("No retrieval info available.")
return
message_id = getattr(raw_response, "message_id", None)
retrieval_contents = getattr(raw_response, "retrieval_contents", [])
if not message_id or not retrieval_contents:
st.info("No retrieval metadata returned.")
return
first_content_id = getattr(retrieval_contents[0], "content_id", None)
if not first_content_id:
st.info("Missing content_id in retrieval metadata.")
return
ret_result = client.agents.query.retrieval_info(message_id=message_id, agent_id=agent_id, content_ids=[first_content_id])
metadatas = getattr(ret_result, "content_metadatas", [])
if not metadatas:
st.info("No content metadatas found.")
return
page_img_b64 = getattr(metadatas[0], "page_img", None)
if not page_img_b64:
st.info("No page image provided in metadata.")
return
import base64
img_bytes = base64.b64decode(page_img_b64)
st.image(img_bytes, caption="Top Attribution Page", use_container_width=True)
# Removed raw object rendering to keep UI clean
except Exception as e:
st.error(f"Failed to load retrieval info: {e}")
def update_agent_prompt(client, agent_id: str, system_prompt: str) -> bool:
try:
client.agents.update(agent_id=agent_id, system_prompt=system_prompt)
return True
except Exception as e:
st.error(f"Failed to update system prompt: {e}")
return False
def evaluate_with_lmunit(client, query: str, response_text: str, unit_test: str):
try:
result = client.lmunit.create(query=query, response=response_text, unit_test=unit_test)
st.subheader("Evaluation Result")
st.code(str(result), language="json")
except Exception as e:
st.error(f"LMUnit evaluation failed: {e}")
def post_process_answer(text: str) -> str:
text = re.sub(r"\(\s*\)", "", text)
text = text.replace("• ", "\n- ")
return text
init_session_state()
st.title("Contextual AI RAG Agent")
if not sidebar_api_form():
st.info("Please enter your Contextual AI API key in the sidebar to continue.")
st.stop()
client = ensure_client()
with st.expander("1) Create or Select Datastore", expanded=True):
if not st.session_state.datastore_id:
default_name = "contextualai_rag_datastore"
ds_name = st.text_input("Datastore Name", value=default_name)
if st.button("Create Datastore"):
ds_id = create_datastore(client, ds_name)
if ds_id:
st.session_state.datastore_id = ds_id
st.success(f"Created datastore: {ds_id}")
else:
st.success(f"Using Datastore: {st.session_state.datastore_id}")
with st.expander("2) Upload Documents", expanded=True):
uploaded_files = st.file_uploader("Upload PDFs or text files", type=["pdf", "txt", "md"], accept_multiple_files=True)
metadata_json = st.text_area("Custom Metadata (JSON)", value="", placeholder='{"custom_metadata": {"field1": "value1"}}')
if uploaded_files and st.session_state.datastore_id:
contents = [f.getvalue() for f in uploaded_files]
names = [f.name for f in uploaded_files]
if st.button("Ingest Documents"):
parsed_metadata = None
if metadata_json.strip():
try:
parsed_metadata = json.loads(metadata_json)
except Exception as e:
st.error(f"Invalid metadata JSON: {e}")
parsed_metadata = None
ids = upload_documents(client, st.session_state.datastore_id, contents, names, parsed_metadata)
if ids:
st.success(f"Uploaded {len(ids)} document(s)")
wait_until_documents_ready(st.session_state.contextual_api_key, st.session_state.datastore_id, st.session_state.base_url)
st.info("Documents are ready.")
with st.expander("3) Create or Select Agent", expanded=True):
if not st.session_state.agent_id and st.session_state.datastore_id:
agent_name = st.text_input("Agent Name", value="ContextualAI RAG Agent")
agent_desc = st.text_area("Agent Description", value="RAG agent over uploaded documents")
if st.button("Create Agent"):
a_id = create_agent(client, agent_name, agent_desc, st.session_state.datastore_id)
if a_id:
st.session_state.agent_id = a_id
st.success(f"Created agent: {a_id}")
elif st.session_state.agent_id:
st.success(f"Using Agent: {st.session_state.agent_id}")
with st.expander("4) Agent Settings (Optional)"):
if st.session_state.agent_id:
system_prompt_val = st.text_area("System Prompt", value="", placeholder="Paste a new system prompt to update your agent")
if st.button("Update System Prompt") and system_prompt_val.strip():
ok = update_agent_prompt(client, st.session_state.agent_id, system_prompt_val.strip())
if ok:
st.success("System prompt updated.")
st.divider()
for message in st.session_state.chat_history:
with st.chat_message(message["role"]):
st.markdown(message["content"])
query = st.chat_input("Ask a question about your documents")
if query:
st.session_state.last_user_query = query
st.session_state.chat_history.append({"role": "user", "content": query})
with st.chat_message("user"):
st.markdown(query)
if st.session_state.agent_id:
with st.chat_message("assistant"):
answer, raw = query_agent(client, st.session_state.agent_id, query)
st.session_state.last_raw_response = raw
processed = post_process_answer(answer)
st.markdown(processed)
st.session_state.chat_history.append({"role": "assistant", "content": processed})
else:
st.error("Please create or select an agent first.")
with st.expander("Debug & Evaluation", expanded=False):
st.caption("Tools to inspect retrievals and evaluate answers")
if st.session_state.agent_id:
if st.checkbox("Show Retrieval Info", value=False):
show_retrieval_info(client, st.session_state.last_raw_response, st.session_state.agent_id)
st.markdown("")
unit_test = st.text_area("LMUnit rubric / unit test", value="Does the response avoid unnecessary information?", height=80)
if st.button("Evaluate Last Answer with LMUnit"):
if st.session_state.last_user_query and st.session_state.chat_history:
last_assistant_msgs = [m for m in st.session_state.chat_history if m["role"] == "assistant"]
if last_assistant_msgs:
evaluate_with_lmunit(client, st.session_state.last_user_query, last_assistant_msgs[-1]["content"], unit_test)
else:
st.info("No assistant response to evaluate yet.")
else:
st.info("Ask a question first to run an evaluation.")
with st.sidebar:
st.divider()
col1, col2 = st.columns(2)
with col1:
if st.button("Clear Chat"):
st.session_state.chat_history = []
st.session_state.last_raw_response = None
st.session_state.last_user_query = ""
st.rerun()
with col2:
if st.button("Reset App"):
st.session_state.clear()
st.rerun()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "rag_tutorials/contextualai_rag_agent/contextualai_rag_agent.py",
"license": "Apache License 2.0",
"lines": 279,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:rag_tutorials/agentic_rag_embedding_gemma/agentic_rag_embeddinggemma.py | import streamlit as st
from agno.agent import Agent
from agno.knowledge.embedder.ollama import OllamaEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.ollama import Ollama
from agno.vectordb.lancedb import LanceDb, SearchType
# Page configuration
st.set_page_config(
page_title="Agentic RAG with Google's EmbeddingGemma",
page_icon="🔥",
layout="wide"
)
@st.cache_resource
def load_knowledge_base():
knowledge_base = Knowledge(
vector_db=LanceDb(
table_name="recipes",
uri="tmp/lancedb",
search_type=SearchType.vector,
embedder=OllamaEmbedder(id="embeddinggemma:latest", dimensions=768),
),
)
return knowledge_base
# Initialize URLs in session state
if 'urls' not in st.session_state:
st.session_state.urls = []
if 'urls_loaded' not in st.session_state:
st.session_state.urls_loaded = set()
kb = load_knowledge_base()
# Load initial URLs if any (only load once per URL)
for url in st.session_state.urls:
if url not in st.session_state.urls_loaded:
kb.add_content(url=url)
st.session_state.urls_loaded.add(url)
agent = Agent(
model=Ollama(id="llama3.2:latest"),
knowledge=kb,
instructions=[
"Search the knowledge base for relevant information and base your answers on it.",
"Be clear, and generate well-structured answers.",
"Use clear headings, bullet points, or numbered lists where appropriate.",
],
search_knowledge=True,
debug_mode=False,
markdown=True,
)
# Sidebar for adding knowledge sources
with st.sidebar:
col1, col2, col3 = st.columns(3)
with col1:
st.image("google.png")
with col2:
st.image("ollama.png")
with col3:
st.image("agno.png")
st.header("🌐 Add Knowledge Sources")
new_url = st.text_input(
"Add URL",
placeholder="https://example.com/sample.pdf",
help="Enter a PDF URL to add to the knowledge base",
)
if st.button("➕ Add URL", type="primary"):
if new_url:
if new_url not in st.session_state.urls:
st.session_state.urls.append(new_url)
with st.spinner("📥 Adding new URL..."):
kb.add_content(url=new_url)
st.session_state.urls_loaded.add(new_url)
st.success(f"✅ Added: {new_url}")
st.rerun()
else:
st.warning("This URL has already been added.")
else:
st.error("Please enter a URL")
# Display current URLs
if st.session_state.urls:
st.subheader("📚 Current Knowledge Sources")
for i, url in enumerate(st.session_state.urls, 1):
st.markdown(f"{i}. {url}")
# Main title and description
st.title("🔥 Agentic RAG with EmbeddingGemma (100% local)")
st.markdown(
"""
This app demonstrates an agentic RAG system using local models via [Ollama](https://ollama.com/):
- **EmbeddingGemma** for creating vector embeddings
- **LanceDB** as the local vector database
Add PDF URLs in the sidebar to start and ask questions about the content.
"""
)
query = st.text_input("Enter your question:")
# Simple answer generation
if st.button("🚀 Get Answer", type="primary"):
if not query:
st.error("Please enter a question")
else:
st.markdown("### 💡 Answer")
with st.spinner("🔍 Searching knowledge and generating answer..."):
try:
response = ""
resp_container = st.empty()
gen = agent.run(query, stream=True)
for resp_chunk in gen:
# Display response
if resp_chunk.content is not None:
response += resp_chunk.content
resp_container.markdown(response)
except Exception as e:
st.error(f"Error: {e}")
with st.expander("📖 How This Works"):
st.markdown(
"""
**This app uses the Agno framework to create an intelligent Q&A system:**
1. **Knowledge Loading**: PDF URLs are processed and stored in LanceDB vector database
2. **EmbeddingGemma as Embedder**: EmbeddingGemma generates local embeddings for semantic search
3. **Llama 3.2**: The Llama 3.2 model generates answers based on retrieved context
**Key Components:**
- `EmbeddingGemma` as the embedder
- `LanceDB` as the vector database
- `Knowledge`: Manages document loading from PDF URLs
- `OllamaEmbedder`: Uses EmbeddingGemma for embeddings
- `Agno Agent`: Orchestrates everything to answer questions
"""
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "rag_tutorials/agentic_rag_embedding_gemma/agentic_rag_embeddinggemma.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_llm_apps/resume_job_matcher/app.py | import streamlit as st
import requests
import fitz # PyMuPDF for PDF parsing
st.set_page_config(page_title="📄 Resume & Job Matcher", layout="centered")
st.title("📄 Resume & Job Matcher")
st.sidebar.info("""
This app uses a local LLM via **Ollama**.
1. Install Ollama: https://ollama.ai
2. Verify the ollama CLI works, by running the below commands in your terminal:
2.1. Start the Ollama server: `ollama serve` on separate terminal.
2.2. Run a model (e.g., `ollama pull llama3`).
2.3. Verify local LLM llama is listed using `ollama list`.
2.4. Run the streamlit run app.py command to start this app in another terminal.
3. Upload a Resume + Job Description to get a fit score and suggestions.
""")
# Helper: Extract text from PDF
def extract_pdf_text(file):
text = ""
with fitz.open(stream=file.read(), filetype="pdf") as doc:
for page in doc:
text += page.get_text()
return text
def get_text_from_file(file_name) -> str:
if file_name.type == "application/pdf":
file_text = extract_pdf_text(file_name)
else:
file_text = file_name.read().decode("utf-8")
return file_text
# File uploaders
resume_file = st.file_uploader("Upload Resume (PDF/TXT)", type=["pdf", "txt"])
job_file = st.file_uploader("Upload Job Description (PDF/TXT)", type=["pdf", "txt"])
if st.button("🔍 Match Resume with Job Description"):
if resume_file and job_file:
# Extract Resume text
resume_text = get_text_from_file(resume_file)
# Extract Job Description text
job_text = get_text_from_file(job_file)
# Prompt
prompt = f"""
You are an AI career assistant.
Resume:
{resume_text}
Job Description:
{job_text}
Please analyze and return:
1. A **Fit Score** (0-100%) of how well this resume matches the job.
2. Key strengths (resume areas that align well).
3. Specific recommendations to improve the resume to better fit the job.
Format neatly in Markdown.
"""
try:
with st.spinner("⏳ Analyzing Resume vs Job Description..."):
response = requests.post(
"http://localhost:11434/api/generate",
json={"model": "llama3", "prompt": prompt, "stream": False},
)
data = response.json()
output = data.get("response", "⚠️ No response from model.")
# Show Results
st.subheader("📌 Match Analysis")
st.markdown(output)
# Save in session for download
st.session_state["resume_match"] = output
except Exception as e:
st.error(f"An error occurred: {str(e)}")
else:
st.warning("⚠️ Please upload both Resume and Job Description.")
# Download button
if "resume_match" in st.session_state:
st.download_button(
"💾 Download Match Report",
st.session_state["resume_match"],
file_name="resume_match_report.md",
mime="text/markdown"
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_llm_apps/resume_job_matcher/app.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_llm_apps/llm_finetuning_tutorials/gemma3_finetuning/finetune_gemma3.py | import torch
from unsloth import FastModel # Unsloth fast loader + training utils
from unsloth.chat_templates import get_chat_template, standardize_sharegpt
from datasets import load_dataset # Hugging Face datasets
from trl import SFTTrainer # Supervised fine-tuning trainer
from transformers import TrainingArguments # Training hyperparameters
# Minimal config (GPU expected). Adjust sizes: 270m, 1b, 4b, 12b, 27b
MODEL_NAME = "unsloth/gemma-3-270m-it"
MAX_SEQ_LEN = 2048
LOAD_IN_4BIT = True # 4-bit quantized loading for low VRAM
LOAD_IN_8BIT = False # 8-bit quantized loading for low VRAM
FULL_FINETUNING = False # LoRA adapters (efficient) instead of full FT
def load_model_and_tokenizer():
# Load Gemma 3 + tokenizer with desired context/quantization
model, tokenizer = FastModel.from_pretrained(
model_name=MODEL_NAME,
max_seq_length=MAX_SEQ_LEN,
load_in_4bit=LOAD_IN_4BIT,
load_in_8bit=LOAD_IN_8BIT,
full_finetuning=FULL_FINETUNING,
)
if not FULL_FINETUNING:
# Add LoRA adapters on attention/MLP projections (PEFT)
model = FastModel.get_peft_model(
model,
r=16,
target_modules=[
"q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj",
],
)
# Apply Gemma 3 chat template for correct conversation formatting
tokenizer = get_chat_template(tokenizer, chat_template="gemma-3")
return model, tokenizer
def prepare_dataset(tokenizer):
# Load ShareGPT-style conversations and standardize schema
dataset = load_dataset("mlabonne/FineTome-100k", split="train")
dataset = standardize_sharegpt(dataset)
# Render each conversation into a single training string
dataset = dataset.map(
lambda ex: {"text": [tokenizer.apply_chat_template(c, tokenize=False) for c in ex["conversations"]]},
batched=True,
)
return dataset
def train(model, dataset):
# Choose precision based on CUDA capabilities
use_bf16 = torch.cuda.is_available() and torch.cuda.is_bf16_supported()
use_fp16 = torch.cuda.is_available() and not use_bf16
trainer = SFTTrainer(
model=model,
train_dataset=dataset,
dataset_text_field="text",
max_seq_length=MAX_SEQ_LEN,
args=TrainingArguments(
per_device_train_batch_size=2,
gradient_accumulation_steps=4,
warmup_steps=5,
max_steps=60,
learning_rate=2e-4,
bf16=use_bf16,
fp16=use_fp16,
logging_steps=1,
output_dir="outputs",
),
)
trainer.train()
def main():
# 1) Load model/tokenizer, 2) Prep data, 3) Train, 4) Save weights
model, tokenizer = load_model_and_tokenizer()
dataset = prepare_dataset(tokenizer)
train(model, dataset)
model.save_pretrained("finetuned_model")
if __name__ == "__main__":
main()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_llm_apps/llm_finetuning_tutorials/gemma3_finetuning/finetune_gemma3.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/10_1_default_tracing/agent.py | from agents import Agent, Runner
import asyncio
# Create agent for tracing demonstrations
root_agent = Agent(
name="Tracing Demo Agent",
instructions="""
You are a helpful assistant demonstrating tracing capabilities.
Respond concisely but perform actions that generate interesting trace data.
"""
)
# Example 1: Basic automatic tracing
async def basic_automatic_tracing():
"""Demonstrates default tracing that happens automatically"""
print("=== Basic Automatic Tracing ===")
print("Tracing is enabled by default - no setup required!")
print("View traces at: https://platform.openai.com/traces")
# Single agent run - creates one trace automatically
result = await Runner.run(
root_agent,
"Explain what tracing means in software development."
)
print(f"Response: {result.final_output}")
print(f"Trace ID: {result.run_id}") # Each run gets a unique ID
print("Check the OpenAI Traces dashboard to see this execution!")
return result
# Example 2: Multiple runs create separate traces
async def multiple_separate_traces():
"""Shows how separate runs create individual traces"""
print("\n=== Multiple Separate Traces ===")
print("Each Runner.run() call creates a separate trace")
# First trace
result1 = await Runner.run(
root_agent,
"What are the benefits of monitoring software?"
)
print(f"Trace 1 ID: {result1.run_id}")
# Second trace (separate from first)
result2 = await Runner.run(
root_agent,
"How do you debug performance issues?"
)
print(f"Trace 2 ID: {result2.run_id}")
print("Two separate traces created - each with its own workflow view")
return result1, result2
# Example 3: Understanding trace structure
async def trace_structure_demo():
"""Demonstrates what gets captured in traces"""
print("\n=== Trace Structure Demo ===")
print("Each trace automatically captures:")
print("• LLM generations (input/output)")
print("• Execution time and performance")
print("• Any errors or exceptions")
print("• Metadata and context")
# Create a run that will generate rich trace data
result = await Runner.run(
root_agent,
"List 3 key components of observability and explain each briefly."
)
print(f"Response generated: {len(result.final_output)} characters")
print(f"Trace contains rich data for run: {result.run_id}")
# Show what type of information is captured
print("\nIn the trace dashboard, you'll see:")
print("1. Workflow timeline with duration")
print("2. LLM generation details (model, tokens, etc.)")
print("3. Input/output content and metadata")
print("4. Performance metrics and execution flow")
return result
# Example 4: Tracing configuration options
async def tracing_configuration():
"""Shows how to configure tracing behavior"""
print("\n=== Tracing Configuration ===")
# Example of disabling tracing for specific run
from agents.run import RunConfig
print("Running with tracing disabled...")
result_no_trace = await Runner.run(
root_agent,
"This run won't be traced.",
run_config=RunConfig(tracing_disabled=True)
)
print(f"Run completed without tracing: {result_no_trace.run_id}")
print("(This run won't appear in traces dashboard)")
print("\nRunning with normal tracing...")
result_with_trace = await Runner.run(
root_agent,
"This run will be traced normally."
)
print(f"Run completed with tracing: {result_with_trace.run_id}")
print("(This run will appear in traces dashboard)")
return result_no_trace, result_with_trace
# Main execution
async def main():
print("🔍 OpenAI Agents SDK - Tracing Basics")
print("=" * 50)
await basic_automatic_tracing()
await multiple_separate_traces()
await trace_structure_demo()
await tracing_configuration()
print("\n✅ Tracing tutorial complete!")
print("Visit https://platform.openai.com/traces to explore your traces")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/10_1_default_tracing/agent.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/10_2_custom_tracing/agent.py | from agents import Agent, Runner, trace, custom_span
import asyncio
# Create agents for custom tracing demonstrations
research_agent = Agent(
name="Research Agent",
instructions="You are a research assistant. Provide concise, factual information."
)
analysis_agent = Agent(
name="Analysis Agent",
instructions="You analyze information and provide insights."
)
# Example 1: Custom trace for multi-step workflow
async def multi_step_workflow_trace():
"""Demonstrates grouping multiple agent runs in a single trace"""
print("=== Multi-Step Workflow Trace ===")
# Create custom trace that groups multiple operations
with trace("Research and Analysis Workflow") as workflow_trace:
print("Starting research phase...")
# Step 1: Research
research_result = await Runner.run(
research_agent,
"What are the key benefits of artificial intelligence in healthcare?"
)
print(f"Research complete: {len(research_result.final_output)} characters")
# Step 2: Analysis
analysis_result = await Runner.run(
analysis_agent,
f"Analyze this research and identify the top 3 benefits: {research_result.final_output}"
)
print(f"Analysis complete: {len(analysis_result.final_output)} characters")
# Step 3: Summary
summary_result = await Runner.run(
analysis_agent,
f"Create a brief executive summary of these findings: {analysis_result.final_output}"
)
print(f"Summary complete: {len(summary_result.final_output)} characters")
print(f"Workflow trace created: {workflow_trace.trace_id}")
print("All three agent runs are grouped in a single trace!")
return research_result, analysis_result, summary_result
# Example 2: Custom spans for business logic
async def custom_spans_demo():
"""Shows how to add custom spans for monitoring business logic"""
print("\n=== Custom Spans Demo ===")
with trace("Document Processing Workflow") as doc_trace:
# Custom span for data preparation
with custom_span("Data Preparation") as prep_span:
print("Preparing data...")
# Simulate data processing
await asyncio.sleep(0.1)
prep_span.add_event("Data loaded", {"records": 100})
prep_span.add_event("Data validated", {"errors": 0})
# Custom span for agent processing
with custom_span("AI Processing") as ai_span:
print("Processing with AI...")
result = await Runner.run(
research_agent,
"Summarize the importance of data quality in AI systems."
)
ai_span.add_event("Processing complete", {
"output_length": len(result.final_output),
"model_used": "gpt-4o"
})
# Custom span for post-processing
with custom_span("Post Processing") as post_span:
print("Post-processing results...")
await asyncio.sleep(0.1)
post_span.add_event("Results formatted", {"format": "text"})
post_span.add_event("Quality check passed", {"score": 0.95})
print(f"Document processing trace: {doc_trace.trace_id}")
print("Custom spans provide detailed workflow visibility!")
return result
# Example 3: Hierarchical spans
async def hierarchical_spans():
"""Demonstrates nested spans for complex workflows"""
print("\n=== Hierarchical Spans ===")
with trace("E-commerce Order Processing") as order_trace:
with custom_span("Order Validation") as validation_span:
print("Validating order...")
# Nested span for inventory check
with custom_span("Inventory Check") as inventory_span:
await asyncio.sleep(0.05)
inventory_span.add_event("Stock verified", {"available": True})
# Nested span for payment validation
with custom_span("Payment Validation") as payment_span:
await asyncio.sleep(0.05)
payment_span.add_event("Payment authorized", {"amount": 99.99})
validation_span.add_event("Order validated", {"order_id": "ORD-12345"})
with custom_span("AI Recommendation Generation") as rec_span:
print("Generating recommendations...")
result = await Runner.run(
research_agent,
"What are good complementary products for a wireless headset purchase?"
)
rec_span.add_event("Recommendations generated", {
"count": 3,
"confidence": 0.89
})
with custom_span("Order Completion") as completion_span:
print("Completing order...")
completion_span.add_event("Shipping scheduled", {"tracking": "TRK-789"})
completion_span.add_event("Email sent", {"type": "confirmation"})
print(f"E-commerce order trace: {order_trace.trace_id}")
print("Hierarchical spans show detailed operation breakdown!")
return result
# Example 4: Trace metadata and grouping
async def trace_metadata_demo():
"""Shows how to use trace metadata and grouping"""
print("\n=== Trace Metadata and Grouping ===")
# Create multiple traces with shared group ID
conversation_id = "conv_12345"
# First interaction in conversation
with trace(
"Customer Support - Initial Inquiry",
group_id=conversation_id,
metadata={"customer_id": "cust_789", "priority": "high"}
) as trace1:
result1 = await Runner.run(
research_agent,
"How do I reset my password?"
)
trace1.add_event("Initial inquiry processed", {"category": "password_reset"})
# Follow-up interaction in same conversation
with trace(
"Customer Support - Follow-up",
group_id=conversation_id,
metadata={"customer_id": "cust_789", "interaction": 2}
) as trace2:
result2 = await Runner.run(
analysis_agent,
f"Based on this password reset request, what additional security measures should we recommend? Context: {result1.final_output}"
)
trace2.add_event("Follow-up completed", {"recommendations_provided": True})
print(f"Conversation traces: {trace1.trace_id}, {trace2.trace_id}")
print(f"Grouped under conversation: {conversation_id}")
print("Metadata helps organize and filter traces in dashboard!")
return result1, result2
# Main execution
async def main():
print("🎨 OpenAI Agents SDK - Custom Tracing")
print("=" * 50)
await multi_step_workflow_trace()
await custom_spans_demo()
await hierarchical_spans()
await trace_metadata_demo()
print("\n✅ Custom tracing tutorial complete!")
print("Check the OpenAI Traces dashboard to see your custom workflow visualizations")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/10_2_custom_tracing/agent.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/realtime/agent.py | import asyncio
from agents import function_tool
from agents.realtime import RealtimeAgent, RealtimeRunner, realtime_handoff
"""
Basic realtime voice agent example using OpenAI's Realtime API.
Run it via: python agent.py
This demonstrates the core realtime components from the official guide:
https://openai.github.io/openai-agents-python/realtime/guide/
Core Components:
1. RealtimeAgent - Agent with instructions, tools, and handoffs
2. RealtimeRunner - Manages configuration and sessions
3. RealtimeSession - Single conversation session
4. Event handling - Process audio, transcripts, and tool calls
"""
# Basic function tool
@function_tool
def get_weather(city: str) -> str:
"""Get current weather for a city."""
print(f"[debug] get_weather called with city: {city}")
return f"The weather in {city} is sunny, 72°F"
@function_tool
def book_appointment(date: str, time: str, service: str) -> str:
"""Book an appointment."""
print(f"[debug] book_appointment called: {service} on {date} at {time}")
return f"Appointment booked for {service} on {date} at {time}"
# Specialized agent for handoffs
billing_agent = RealtimeAgent(
name="Billing Support",
instructions="You specialize in billing and payment issues.",
)
# Main realtime agent
agent = RealtimeAgent(
name="Assistant",
instructions="You are a helpful voice assistant. Keep responses brief and conversational.",
tools=[get_weather, book_appointment],
handoffs=[
realtime_handoff(billing_agent, tool_description="Transfer to billing support")
]
)
async def main():
"""Basic realtime session example"""
print("🎙️ Basic Realtime Voice Agent")
print("=" * 40)
# Set up the runner with basic configuration
runner = RealtimeRunner(
starting_agent=agent,
config={
"model_settings": {
"model_name": "gpt-4o-realtime-preview",
"voice": "alloy",
"modalities": ["text", "audio"],
"input_audio_transcription": {
"model": "whisper-1"
},
"turn_detection": {
"type": "server_vad",
"threshold": 0.5,
"silence_duration_ms": 200
}
}
}
)
# Start the session
print("Starting realtime session...")
session = await runner.run()
print("Session started! Speak naturally - agent will respond in real-time.")
print("Try: 'What's the weather in Paris?' or 'Book appointment tomorrow at 2pm'")
print("Press Ctrl+C to end")
print("-" * 40)
# Handle session events
async with session:
try:
async for event in session:
# Handle key event types
if event.type == "response.audio_transcript.done":
print(f"🤖 Assistant: {event.transcript}")
elif event.type == "conversation.item.input_audio_transcription.completed":
print(f"👤 User: {event.transcript}")
elif event.type == "response.function_call_arguments.done":
print(f"🔧 Tool called: {event.name}")
elif event.type == "error":
print(f"❌ Error: {event.error}")
break
except KeyboardInterrupt:
print("\n⏹️ Session ended")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/realtime/agent.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/agent.py | import asyncio
import random
import numpy as np
from agents import Agent, function_tool
from agents.extensions.handoff_prompt import prompt_with_handoff_instructions
from agents.voice import (
AudioInput,
SingleAgentVoiceWorkflow,
SingleAgentWorkflowCallbacks,
VoicePipeline,
)
from .util import AudioPlayer, record_audio
"""
This is a simple example that uses a recorded audio buffer. Run it via:
`python -m ai_agent_framework_crash_course.openai_sdk_crash_course.11_voice.static.agent`
1. You can record an audio clip in the terminal.
2. The pipeline automatically transcribes the audio.
3. The agent workflow is a simple one that starts at the Assistant agent.
4. The output of the agent is streamed to the audio player.
Try examples like:
- Tell me a joke (will respond with a joke)
- What's the weather in Tokyo? (will call the `get_weather` tool and then speak)
- Hola, como estas? (will handoff to the spanish agent)
"""
@function_tool
def get_weather(city: str) -> str:
"""Get the weather for a given city."""
print(f"[debug] get_weather called with city: {city}")
choices = ["sunny", "cloudy", "rainy", "snowy"]
return f"The weather in {city} is {random.choice(choices)}."
@function_tool
def get_time() -> str:
"""Get the current time."""
import datetime
current_time = datetime.datetime.now().strftime("%I:%M %p")
print(f"[debug] get_time called, current time: {current_time}")
return f"The current time is {current_time}."
@function_tool
def calculate_tip(bill_amount: float, tip_percentage: float = 15.0) -> str:
"""Calculate tip amount for a bill."""
tip_amount = bill_amount * (tip_percentage / 100)
total_amount = bill_amount + tip_amount
print(f"[debug] calculate_tip called with bill: ${bill_amount}, tip: {tip_percentage}%")
return f"For a bill of ${bill_amount:.2f} with {tip_percentage}% tip, the tip is ${tip_amount:.2f} and total is ${total_amount:.2f}."
spanish_agent = Agent(
name="Spanish",
handoff_description="A spanish speaking agent.",
instructions=prompt_with_handoff_instructions(
"You're speaking to a human, so be polite and concise. Speak in Spanish only. "
"Help with weather, time, and calculations as needed."
),
model="gpt-4o-mini",
tools=[get_weather, get_time, calculate_tip]
)
french_agent = Agent(
name="French",
handoff_description="A french speaking agent.",
instructions=prompt_with_handoff_instructions(
"You're speaking to a human, so be polite and concise. Speak in French only. "
"Help with weather, time, and calculations as needed."
),
model="gpt-4o-mini",
tools=[get_weather, get_time, calculate_tip]
)
agent = Agent(
name="Assistant",
instructions=prompt_with_handoff_instructions(
"""You're speaking to a human, so be polite and concise.
You can help with:
- Weather information for any city
- Current time
- Tip calculations
- General conversation and jokes
Language handling:
- If the user speaks in Spanish, handoff to the Spanish agent
- If the user speaks in French, handoff to the French agent
- Otherwise, respond in English
Keep responses conversational and friendly for voice interaction."""
),
model="gpt-4o-mini",
handoffs=[spanish_agent, french_agent],
tools=[get_weather, get_time, calculate_tip],
)
class WorkflowCallbacks(SingleAgentWorkflowCallbacks):
"""Custom callbacks to monitor the voice workflow."""
def on_run(self, workflow: SingleAgentVoiceWorkflow, transcription: str) -> None:
"""Called when the workflow runs with a new transcription."""
print(f"[debug] 🎯 Workflow running with transcription: '{transcription}'")
def on_tool_call(self, tool_name: str, arguments: dict) -> None:
"""Called when a tool is about to be executed."""
print(f"[debug] 🔧 Tool call: {tool_name} with args: {arguments}")
def on_handoff(self, from_agent: str, to_agent: str) -> None:
"""Called when a handoff occurs between agents."""
print(f"[debug] 🔄 Handoff from {from_agent} to {to_agent}")
async def main():
"""Main function to run the static voice agent example."""
print("🎙️ Static Voice Agent Demo")
print("=" * 50)
print()
# Create the voice pipeline with our agent and callbacks
pipeline = VoicePipeline(
workflow=SingleAgentVoiceWorkflow(agent, callbacks=WorkflowCallbacks())
)
print("This demo will:")
print("1. 🎤 Record your voice for a few seconds")
print("2. 🔄 Transcribe your speech to text")
print("3. 🤖 Process with AI agent")
print("4. 🔊 Convert response back to speech")
print()
# Record audio input
try:
audio_buffer = record_audio(duration=5.0)
print(f"📊 Recorded {len(audio_buffer)} audio samples")
# Create audio input for the pipeline
audio_input = AudioInput(buffer=audio_buffer)
# Run the voice pipeline
print("\n🔄 Processing with voice pipeline...")
result = await pipeline.run(audio_input)
# Play the result audio
print("🔊 Playing AI response...")
with AudioPlayer() as player:
audio_chunks_received = 0
lifecycle_events = 0
async for event in result.stream():
if event.type == "voice_stream_event_audio":
player.add_audio(event.data)
audio_chunks_received += 1
if audio_chunks_received % 10 == 0: # Progress indicator
print(f"🎵 Received {audio_chunks_received} audio chunks...")
elif event.type == "voice_stream_event_lifecycle":
lifecycle_events += 1
print(f"📋 Lifecycle event: {event.event}")
elif event.type == "voice_stream_event_error":
print(f"❌ Error event: {event.error}")
# Add 1 second of silence to ensure the audio finishes playing
print("🔇 Adding silence buffer...")
player.add_audio(np.zeros(24000 * 1, dtype=np.int16))
print(f"\n✅ Voice interaction complete!")
print(f"📊 Statistics:")
print(f" - Audio chunks played: {audio_chunks_received}")
print(f" - Lifecycle events: {lifecycle_events}")
except KeyboardInterrupt:
print("\n⏹️ Demo interrupted by user.")
except Exception as e:
print(f"\n❌ Demo failed: {e}")
import traceback
traceback.print_exc()
def demo_with_examples():
"""Run multiple example scenarios for demonstration."""
examples = [
"Tell me a joke",
"What's the weather in New York?",
"What time is it?",
"Calculate a 20% tip on a $50 bill",
"Hola, como estas?", # Spanish handoff
"Bonjour, comment allez-vous?" # French handoff
]
print("🎭 Demo Examples:")
for i, example in enumerate(examples, 1):
print(f"{i}. {example}")
print()
print("You can try saying any of these examples when recording!")
if __name__ == "__main__":
print("🚀 OpenAI Agents SDK - Static Voice Demo")
print("=" * 60)
# Show example prompts
demo_with_examples()
# Run the main demo
asyncio.run(main())
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/agent.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/util.py | import threading
import time
from typing import Optional
import numpy as np
import sounddevice as sd
class AudioPlayer:
"""A simple audio player using sounddevice for real-time audio playback."""
def __init__(self, sample_rate: int = 24000, channels: int = 1, dtype=np.int16):
self.sample_rate = sample_rate
self.channels = channels
self.dtype = dtype
self.stream: Optional[sd.OutputStream] = None
self._stop_event = threading.Event()
def __enter__(self):
"""Context manager entry - start the audio stream."""
self.stream = sd.OutputStream(
samplerate=self.sample_rate,
channels=self.channels,
dtype=self.dtype
)
self.stream.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit - stop and close the audio stream."""
if self.stream:
self.stream.stop()
self.stream.close()
def add_audio(self, audio_data: np.ndarray):
"""Add audio data to be played immediately."""
if self.stream and not self._stop_event.is_set():
try:
self.stream.write(audio_data)
except Exception as e:
print(f"[error] Failed to play audio: {e}")
def stop(self):
"""Stop the audio player."""
self._stop_event.set()
def record_audio(
duration: float = 5.0,
sample_rate: int = 24000,
channels: int = 1,
dtype=np.int16
) -> np.ndarray:
"""
Record audio from the microphone for a specified duration.
Args:
duration: Recording duration in seconds
sample_rate: Audio sample rate (Hz)
channels: Number of audio channels
dtype: Audio data type
Returns:
Recorded audio as numpy array
"""
print(f"🎤 Recording audio for {duration} seconds... Press Ctrl+C to stop early.")
print("Say something now!")
try:
# Record audio
recording = sd.rec(
int(duration * sample_rate),
samplerate=sample_rate,
channels=channels,
dtype=dtype
)
# Wait for recording to complete
sd.wait()
print("✅ Recording completed!")
# Convert to 1D array if mono
if channels == 1:
recording = recording.flatten()
return recording.astype(dtype)
except KeyboardInterrupt:
print("\n⏹️ Recording stopped by user.")
sd.stop()
if 'recording' in locals():
return recording[:int(time.time() * sample_rate)].astype(dtype)
else:
# Return empty array if no recording was captured
return np.zeros(sample_rate, dtype=dtype)
except Exception as e:
print(f"❌ Recording failed: {e}")
return np.zeros(sample_rate, dtype=dtype)
def create_silence(duration: float = 1.0, sample_rate: int = 24000, dtype=np.int16) -> np.ndarray:
"""
Create a buffer of silence for the specified duration.
Args:
duration: Duration of silence in seconds
sample_rate: Audio sample rate (Hz)
dtype: Audio data type
Returns:
Silence buffer as numpy array
"""
return np.zeros(int(duration * sample_rate), dtype=dtype)
def save_audio(audio_data: np.ndarray, filename: str, sample_rate: int = 24000):
"""
Save audio data to a WAV file.
Args:
audio_data: Audio data as numpy array
filename: Output filename (should end with .wav)
sample_rate: Audio sample rate (Hz)
"""
try:
import soundfile as sf
sf.write(filename, audio_data, sample_rate)
print(f"✅ Audio saved to {filename}")
except ImportError:
print("❌ soundfile package required for saving audio. Install with: pip install soundfile")
except Exception as e:
print(f"❌ Failed to save audio: {e}")
def load_audio(filename: str, sample_rate: int = 24000, dtype=np.int16) -> np.ndarray:
"""
Load audio data from a WAV file.
Args:
filename: Input filename
sample_rate: Target sample rate (will resample if different)
dtype: Target data type
Returns:
Audio data as numpy array
"""
try:
import soundfile as sf
audio_data, original_sr = sf.read(filename)
# Resample if necessary
if original_sr != sample_rate:
import librosa
audio_data = librosa.resample(audio_data, orig_sr=original_sr, target_sr=sample_rate)
# Convert to target dtype
if dtype == np.int16:
audio_data = (audio_data * 32767).astype(np.int16)
return audio_data
except ImportError:
print("❌ soundfile and librosa packages required for loading audio.")
print("Install with: pip install soundfile librosa")
return np.zeros(sample_rate, dtype=dtype)
except Exception as e:
print(f"❌ Failed to load audio: {e}")
return np.zeros(sample_rate, dtype=dtype)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/util.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/agent.py | import asyncio
import random
import threading
import time
import numpy as np
from agents import Agent, function_tool
from agents.extensions.handoff_prompt import prompt_with_handoff_instructions
from agents.voice import (
StreamedAudioInput,
SingleAgentVoiceWorkflow,
SingleAgentWorkflowCallbacks,
VoicePipeline,
)
from .util import AudioPlayer, StreamedAudioRecorder, create_silence
"""
This is a streaming voice example that processes audio in real-time. Run it via:
`python -m ai_agent_framework_crash_course.openai_sdk_crash_course.11_voice.streamed.agent`
1. The pipeline continuously listens for audio input.
2. It automatically detects when you start and stop speaking.
3. The agent workflow processes speech in real-time.
4. The output is streamed back to you as audio.
This example demonstrates:
- Real-time speech detection and processing
- Streaming audio input and output
- Activity detection for turn-based conversation
- Interruption handling and turn management
Try examples like:
- Start speaking and the agent will respond when you finish
- Try multiple turns of conversation
- Test language handoffs with Spanish or French
"""
@function_tool
def get_weather(city: str) -> str:
"""Get the weather for a given city."""
print(f"[debug] get_weather called with city: {city}")
choices = ["sunny", "cloudy", "rainy", "snowy"]
return f"The weather in {city} is {random.choice(choices)}."
@function_tool
def get_time() -> str:
"""Get the current time."""
import datetime
current_time = datetime.datetime.now().strftime("%I:%M %p")
print(f"[debug] get_time called, current time: {current_time}")
return f"The current time is {current_time}."
@function_tool
def set_reminder(message: str, minutes: int = 5) -> str:
"""Set a simple reminder (demo function)."""
print(f"[debug] set_reminder called: '{message}' in {minutes} minutes")
return f"Reminder set: '{message}' in {minutes} minutes. (This is a demo - no actual reminder will be triggered)"
@function_tool
def get_news_summary() -> str:
"""Get a brief news summary (demo function)."""
print("[debug] get_news_summary called")
# Mock news items
news_items = [
"Technology stocks continue to rise amid AI developments",
"Climate change summit reaches new international agreements",
"Space exploration mission launches successfully",
"New renewable energy projects announced globally"
]
selected_news = random.choice(news_items)
return f"Here's a news update: {selected_news}. This is a demo news summary."
spanish_agent = Agent(
name="Spanish",
handoff_description="A spanish speaking agent.",
instructions=prompt_with_handoff_instructions(
"You're speaking to a human in real-time, so be polite and concise. Speak in Spanish only. "
"Help with weather, time, reminders, and news as needed. Keep responses brief for voice interaction."
),
model="gpt-4o-mini",
tools=[get_weather, get_time, set_reminder, get_news_summary]
)
french_agent = Agent(
name="French",
handoff_description="A french speaking agent.",
instructions=prompt_with_handoff_instructions(
"You're speaking to a human in real-time, so be polite and concise. Speak in French only. "
"Help with weather, time, reminders, and news as needed. Keep responses brief for voice interaction."
),
model="gpt-4o-mini",
tools=[get_weather, get_time, set_reminder, get_news_summary]
)
agent = Agent(
name="Assistant",
instructions=prompt_with_handoff_instructions(
"""You're speaking to a human in real-time voice conversation, so be polite and concise.
You can help with:
- Weather information for any city
- Current time
- Setting reminders (demo)
- News summaries (demo)
- General conversation
Language handling:
- If the user speaks in Spanish, handoff to the Spanish agent
- If the user speaks in French, handoff to the French agent
- Otherwise, respond in English
Keep responses brief and conversational since this is a voice interface.
Acknowledge when users switch topics or ask follow-up questions."""
),
model="gpt-4o-mini",
handoffs=[spanish_agent, french_agent],
tools=[get_weather, get_time, set_reminder, get_news_summary],
)
class StreamingWorkflowCallbacks(SingleAgentWorkflowCallbacks):
"""Custom callbacks to monitor the streaming voice workflow."""
def __init__(self):
self.turn_count = 0
self.start_time = time.time()
def on_run(self, workflow: SingleAgentVoiceWorkflow, transcription: str) -> None:
"""Called when the workflow runs with a new transcription."""
self.turn_count += 1
print(f"\n[debug] 🎯 Turn {self.turn_count} - Transcription: '{transcription}'")
def on_tool_call(self, tool_name: str, arguments: dict) -> None:
"""Called when a tool is about to be executed."""
print(f"[debug] 🔧 Tool call: {tool_name} with args: {arguments}")
def on_handoff(self, from_agent: str, to_agent: str) -> None:
"""Called when a handoff occurs between agents."""
print(f"[debug] 🔄 Handoff from {from_agent} to {to_agent}")
def on_turn_start(self) -> None:
"""Called when a new turn starts."""
elapsed = time.time() - self.start_time
print(f"[debug] ▶️ Turn started (session time: {elapsed:.1f}s)")
def on_turn_end(self) -> None:
"""Called when a turn ends."""
print(f"[debug] ⏹️ Turn ended")
class VoiceSessionManager:
"""Manages the voice session state and audio streams."""
def __init__(self):
self.is_running = False
self.audio_player = None
self.pipeline = None
self.callbacks = StreamingWorkflowCallbacks()
self._stop_event = threading.Event()
async def start_session(self):
"""Start the voice session."""
self.is_running = True
self._stop_event.clear()
# Create the voice pipeline
self.pipeline = VoicePipeline(
workflow=SingleAgentVoiceWorkflow(agent, callbacks=self.callbacks)
)
print("🎙️ Voice session started. Start speaking...")
print("💡 Tips:")
print(" - Speak clearly and pause between sentences")
print(" - Try asking about weather, time, or setting reminders")
print(" - Say something in Spanish or French to test language handoffs")
print(" - Press Ctrl+C to end the session")
print()
# Start audio recording and processing
await self._run_streaming_session()
async def _run_streaming_session(self):
"""Run the main streaming session loop."""
with StreamedAudioRecorder() as recorder:
with AudioPlayer() as player:
self.audio_player = player
# Create streamed audio input
streamed_input = StreamedAudioInput()
# Start the pipeline processing
result = await self.pipeline.run(streamed_input)
# Create tasks for audio input and output processing
input_task = asyncio.create_task(self._process_audio_input(recorder, streamed_input))
output_task = asyncio.create_task(self._process_audio_output(result))
try:
# Run both tasks concurrently
await asyncio.gather(input_task, output_task)
except asyncio.CancelledError:
print("\n🛑 Session cancelled")
finally:
# Cleanup
streamed_input.finish()
self.is_running = False
async def _process_audio_input(self, recorder: StreamedAudioRecorder, streamed_input: StreamedAudioInput):
"""Process incoming audio from the microphone."""
print("🎤 Listening for audio input...")
while self.is_running and not self._stop_event.is_set():
if recorder.has_audio():
audio_chunk = recorder.get_audio_chunk()
if audio_chunk is not None:
# Push audio to the pipeline
streamed_input.push_audio(audio_chunk)
# Small delay to prevent busy waiting
await asyncio.sleep(0.01)
print("⏹️ Audio input processing stopped")
async def _process_audio_output(self, result):
"""Process outgoing audio to the speakers."""
print("🔊 Ready to play audio responses...")
audio_chunks_count = 0
async for event in result.stream():
if self._stop_event.is_set():
break
if event.type == "voice_stream_event_audio":
if self.audio_player:
self.audio_player.add_audio(event.data)
audio_chunks_count += 1
# Progress indicator for long responses
if audio_chunks_count % 20 == 0:
print(f"🎵 Playing response... ({audio_chunks_count} chunks)")
elif event.type == "voice_stream_event_lifecycle":
if event.event == "turn_started":
print("🔄 AI is processing your speech...")
elif event.event == "turn_ended":
print("✅ AI response complete. You can speak again.")
# Add a small silence buffer between turns
if self.audio_player:
self.audio_player.add_audio(create_silence(0.5))
elif event.type == "voice_stream_event_error":
print(f"❌ Voice error: {event.error}")
print("⏹️ Audio output processing stopped")
def stop_session(self):
"""Stop the voice session."""
self.is_running = False
self._stop_event.set()
print("\n🛑 Stopping voice session...")
async def main():
"""Main function to run the streamed voice agent example."""
print("🎙️ Streaming Voice Agent Demo")
print("=" * 50)
print()
session_manager = VoiceSessionManager()
try:
await session_manager.start_session()
except KeyboardInterrupt:
print("\n⏹️ Demo interrupted by user.")
session_manager.stop_session()
except Exception as e:
print(f"\n❌ Demo failed: {e}")
import traceback
traceback.print_exc()
finally:
print("\n👋 Voice session ended. Thanks for trying the demo!")
def show_streaming_features():
"""Display information about streaming voice features."""
print("🌊 Streaming Voice Features:")
print("=" * 40)
print()
print("✨ Real-time Features:")
print(" • Continuous audio input processing")
print(" • Automatic speech activity detection")
print(" • Real-time agent response streaming")
print(" • Turn-based conversation management")
print()
print("🔧 Advanced Capabilities:")
print(" • Multi-language support with agent handoffs")
print(" • Tool calling during voice conversation")
print(" • Streaming callbacks for monitoring")
print(" • Interruption handling (via lifecycle events)")
print()
print("🎯 Try These Commands:")
print(" • 'What's the weather in Paris?'")
print(" • 'What time is it?'")
print(" • 'Set a reminder to call mom in 10 minutes'")
print(" • 'Give me a news summary'")
print(" • 'Hola, ¿cómo estás?' (Spanish)")
print(" • 'Bonjour, comment ça va?' (French)")
print()
if __name__ == "__main__":
print("🚀 OpenAI Agents SDK - Streaming Voice Demo")
print("=" * 60)
# Show streaming features
show_streaming_features()
# Run the main demo
asyncio.run(main())
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/agent.py",
"license": "Apache License 2.0",
"lines": 262,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/util.py | import threading
import time
from typing import Optional
import numpy as np
import sounddevice as sd
class AudioPlayer:
"""A simple audio player using sounddevice for real-time audio playback."""
def __init__(self, sample_rate: int = 24000, channels: int = 1, dtype=np.int16):
self.sample_rate = sample_rate
self.channels = channels
self.dtype = dtype
self.stream: Optional[sd.OutputStream] = None
self._stop_event = threading.Event()
def __enter__(self):
"""Context manager entry - start the audio stream."""
self.stream = sd.OutputStream(
samplerate=self.sample_rate,
channels=self.channels,
dtype=self.dtype
)
self.stream.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit - stop and close the audio stream."""
if self.stream:
self.stream.stop()
self.stream.close()
def add_audio(self, audio_data: np.ndarray):
"""Add audio data to be played immediately."""
if self.stream and not self._stop_event.is_set():
try:
self.stream.write(audio_data)
except Exception as e:
print(f"[error] Failed to play audio: {e}")
def stop(self):
"""Stop the audio player."""
self._stop_event.set()
class StreamedAudioRecorder:
"""A streaming audio recorder that captures audio in real-time."""
def __init__(self, sample_rate: int = 24000, channels: int = 1, dtype=np.int16, chunk_size: int = 1024):
self.sample_rate = sample_rate
self.channels = channels
self.dtype = dtype
self.chunk_size = chunk_size
self.stream: Optional[sd.InputStream] = None
self._audio_queue = []
self._stop_event = threading.Event()
self._lock = threading.Lock()
def __enter__(self):
"""Context manager entry - start the audio stream."""
self.stream = sd.InputStream(
samplerate=self.sample_rate,
channels=self.channels,
dtype=self.dtype,
blocksize=self.chunk_size,
callback=self._audio_callback
)
self.stream.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit - stop and close the audio stream."""
self._stop_event.set()
if self.stream:
self.stream.stop()
self.stream.close()
def _audio_callback(self, indata, frames, time, status):
"""Callback function for audio input stream."""
if status:
print(f"[warning] Audio input status: {status}")
with self._lock:
# Convert to the correct format and add to queue
audio_chunk = indata.copy().flatten().astype(self.dtype)
self._audio_queue.append(audio_chunk)
def get_audio_chunk(self) -> Optional[np.ndarray]:
"""Get the next available audio chunk."""
with self._lock:
if self._audio_queue:
return self._audio_queue.pop(0)
return None
def has_audio(self) -> bool:
"""Check if there's audio data available."""
with self._lock:
return len(self._audio_queue) > 0
def stop(self):
"""Stop the recorder."""
self._stop_event.set()
def record_audio(
duration: float = 5.0,
sample_rate: int = 24000,
channels: int = 1,
dtype=np.int16
) -> np.ndarray:
"""
Record audio from the microphone for a specified duration.
Args:
duration: Recording duration in seconds
sample_rate: Audio sample rate (Hz)
channels: Number of audio channels
dtype: Audio data type
Returns:
Recorded audio as numpy array
"""
print(f"🎤 Recording audio for {duration} seconds... Press Ctrl+C to stop early.")
print("Say something now!")
try:
# Record audio
recording = sd.rec(
int(duration * sample_rate),
samplerate=sample_rate,
channels=channels,
dtype=dtype
)
# Wait for recording to complete
sd.wait()
print("✅ Recording completed!")
# Convert to 1D array if mono
if channels == 1:
recording = recording.flatten()
return recording.astype(dtype)
except KeyboardInterrupt:
print("\n⏹️ Recording stopped by user.")
sd.stop()
if 'recording' in locals():
return recording[:int(time.time() * sample_rate)].astype(dtype)
else:
# Return empty array if no recording was captured
return np.zeros(sample_rate, dtype=dtype)
except Exception as e:
print(f"❌ Recording failed: {e}")
return np.zeros(sample_rate, dtype=dtype)
def create_silence(duration: float = 1.0, sample_rate: int = 24000, dtype=np.int16) -> np.ndarray:
"""
Create a buffer of silence for the specified duration.
Args:
duration: Duration of silence in seconds
sample_rate: Audio sample rate (Hz)
dtype: Audio data type
Returns:
Silence buffer as numpy array
"""
return np.zeros(int(duration * sample_rate), dtype=dtype)
def save_audio(audio_data: np.ndarray, filename: str, sample_rate: int = 24000):
"""
Save audio data to a WAV file.
Args:
audio_data: Audio data as numpy array
filename: Output filename (should end with .wav)
sample_rate: Audio sample rate (Hz)
"""
try:
import soundfile as sf
sf.write(filename, audio_data, sample_rate)
print(f"✅ Audio saved to {filename}")
except ImportError:
print("❌ soundfile package required for saving audio. Install with: pip install soundfile")
except Exception as e:
print(f"❌ Failed to save audio: {e}")
def load_audio(filename: str, sample_rate: int = 24000, dtype=np.int16) -> np.ndarray:
"""
Load audio data from a WAV file.
Args:
filename: Input filename
sample_rate: Target sample rate (will resample if different)
dtype: Target data type
Returns:
Audio data as numpy array
"""
try:
import soundfile as sf
audio_data, original_sr = sf.read(filename)
# Resample if necessary
if original_sr != sample_rate:
import librosa
audio_data = librosa.resample(audio_data, orig_sr=original_sr, target_sr=sample_rate)
# Convert to target dtype
if dtype == np.int16:
audio_data = (audio_data * 32767).astype(np.int16)
return audio_data
except ImportError:
print("❌ soundfile and librosa packages required for loading audio.")
print("Install with: pip install soundfile librosa")
return np.zeros(sample_rate, dtype=dtype)
except Exception as e:
print(f"❌ Failed to load audio: {e}")
return np.zeros(sample_rate, dtype=dtype)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/util.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/1_personal_assistant_agent/agent.py | from agents import Agent, Runner
import asyncio
# Create an agent for demonstrating different execution methods
root_agent = Agent(
name="Personal Assistant Agent",
instructions="""
You are a helpful personal assistant.
Your role is to:
1. Answer questions clearly and concisely
2. Provide helpful information and advice
3. Be friendly and professional
4. Offer practical solutions to problems
When users ask questions:
- Give accurate and helpful responses
- Explain complex topics in simple terms
- Offer follow-up suggestions when appropriate
- Maintain a positive and supportive tone
Keep responses concise but informative.
"""
)
# Example usage patterns
def sync_example():
"""Synchronous execution example"""
result = Runner.run_sync(root_agent, "Hello, how does sync execution work?")
return result.final_output
async def async_example():
"""Asynchronous execution example"""
result = await Runner.run(root_agent, "Hello, how does async execution work?")
return result.final_output
async def streaming_example():
"""Streaming execution example"""
response_text = ""
async for event in Runner.run_streamed(root_agent, "Tell me about streaming execution"):
if hasattr(event, 'content') and event.content:
response_text += event.content
return response_text
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/1_personal_assistant_agent/agent.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/app.py | """
Streamlit Web Interface for Tutorial 1: Your First Agent
This provides an interactive web interface to test the personal assistant agent
with different execution methods.
"""
import os
import asyncio
import streamlit as st
from dotenv import load_dotenv
from agents import Agent, Runner
# Load environment variables
load_dotenv()
# Page configuration
st.set_page_config(
page_title="Personal Assistant Agent",
page_icon="🎯",
layout="wide"
)
# Title and description
st.title("🎯 Personal Assistant Agent")
st.markdown("**Tutorial 1**: Your first OpenAI agent with different execution methods")
# Check API key
if not os.getenv("OPENAI_API_KEY"):
st.error("❌ OPENAI_API_KEY not found. Please create a .env file with your OpenAI API key.")
st.stop()
# Create the agent
@st.cache_resource
def create_agent():
return Agent(
name="Personal Assistant",
instructions="""
You are a helpful personal assistant.
Your role is to:
1. Answer questions clearly and concisely
2. Provide helpful information and advice
3. Be friendly and professional
4. Offer practical solutions to problems
When users ask questions:
- Give accurate and helpful responses
- Explain complex topics in simple terms
- Offer follow-up suggestions when appropriate
- Maintain a positive and supportive tone
Keep responses concise but informative.
"""
)
agent = create_agent()
# Sidebar with execution method selection
st.sidebar.title("Execution Methods")
execution_method = st.sidebar.selectbox(
"Choose execution method:",
["Synchronous", "Asynchronous", "Streaming"]
)
st.sidebar.markdown("---")
st.sidebar.markdown("### About Execution Methods")
if execution_method == "Synchronous":
st.sidebar.info("**Synchronous**: Blocks until response is complete. Simple and straightforward.")
elif execution_method == "Asynchronous":
st.sidebar.info("**Asynchronous**: Non-blocking execution. Good for concurrent operations.")
else:
st.sidebar.info("**Streaming**: Real-time response streaming. Great for long responses.")
# Main chat interface
st.markdown("### Chat Interface")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Ask your personal assistant anything..."):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Generate assistant response
with st.chat_message("assistant"):
try:
if execution_method == "Synchronous":
with st.spinner("Thinking..."):
result = Runner.run_sync(agent, prompt)
response = result.final_output
st.markdown(response)
elif execution_method == "Asynchronous":
with st.spinner("Processing asynchronously..."):
async def get_async_response():
result = await Runner.run(agent, prompt)
return result.final_output
response = asyncio.run(get_async_response())
st.markdown(response)
else: # Streaming
response_placeholder = st.empty()
response_text = ""
async def stream_response():
full_response = ""
async for event in Runner.run_streamed(agent, prompt):
if hasattr(event, 'content') and event.content:
full_response += event.content
response_placeholder.markdown(full_response + "▌")
response_placeholder.markdown(full_response)
return full_response
response = asyncio.run(stream_response())
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
except Exception as e:
error_msg = f"❌ Error: {str(e)}"
st.error(error_msg)
st.session_state.messages.append({"role": "assistant", "content": error_msg})
# Clear chat button
if st.sidebar.button("Clear Chat History"):
st.session_state.messages = []
st.rerun()
# Example prompts
st.sidebar.markdown("---")
st.sidebar.markdown("### Example Prompts")
example_prompts = [
"What are 3 productivity tips for remote work?",
"Explain quantum computing in simple terms",
"Write a short poem about technology",
"How can I improve my focus and concentration?",
"What's the difference between AI and machine learning?"
]
for prompt in example_prompts:
if st.sidebar.button(prompt, key=f"example_{prompt[:20]}"):
# Add the example prompt to chat
st.session_state.messages.append({"role": "user", "content": prompt})
st.rerun()
# Footer with tutorial information
st.markdown("---")
st.markdown("""
### 📚 Tutorial Information
This is **Tutorial 1** of the OpenAI Agents SDK crash course. You're learning:
- ✅ Basic agent creation with the Agent class
- ✅ Different execution methods (sync, async, streaming)
- ✅ Agent configuration with instructions
- ✅ Interactive web interfaces with Streamlit
**Next**: Try [Tutorial 2: Structured Output Agent](../2_structured_output_agent/) to learn about type-safe responses.
""")
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/app.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/2_1_support_ticket_agent/agent.py | from typing import List, Optional
from enum import Enum
from agents import Agent
from pydantic import BaseModel, Field
class Priority(str, Enum):
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class SupportTicket(BaseModel):
title: str = Field(description="A concise summary of the issue")
description: str = Field(description="Detailed description of the problem")
priority: Priority = Field(description="The ticket priority level")
category: str = Field(description="The department this ticket belongs to")
steps_to_reproduce: Optional[List[str]] = Field(
description="Steps to reproduce the issue (for technical problems)",
default=None
)
estimated_resolution_time: str = Field(
description="Estimated time to resolve this issue"
)
root_agent = Agent(
name="Support Ticket Creator",
instructions="""
You are a support ticket creation assistant that converts customer complaints
into well-structured support tickets.
Based on customer descriptions, create structured support tickets with:
- Clear, concise titles
- Detailed problem descriptions
- Appropriate priority levels (low/medium/high/critical)
- Correct categories (technical/billing/account/product/general)
- Steps to reproduce for technical issues
- Realistic resolution time estimates
IMPORTANT: Response must be valid JSON matching the SupportTicket schema.
""",
output_type=SupportTicket
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/2_1_support_ticket_agent/agent.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/2_2_product_review_agent/agent.py | from typing import List, Optional
from enum import Enum
from agents import Agent
from pydantic import BaseModel, Field
class Sentiment(str, Enum):
VERY_POSITIVE = "very_positive"
POSITIVE = "positive"
NEUTRAL = "neutral"
NEGATIVE = "negative"
VERY_NEGATIVE = "very_negative"
class ProductReview(BaseModel):
product_name: Optional[str] = Field(description="Product name if mentioned", default=None)
rating: int = Field(description="Star rating (1-5)", ge=1, le=5)
sentiment: Sentiment = Field(description="Overall sentiment of the review")
main_positives: List[str] = Field(description="Main positive points mentioned", default=[])
main_negatives: List[str] = Field(description="Main negative points mentioned", default=[])
would_recommend: Optional[bool] = Field(description="Whether reviewer would recommend", default=None)
summary: str = Field(description="Brief summary of the review")
root_agent = Agent(
name="Product Review Analyzer",
instructions="""
You are a product review analysis expert that extracts structured data
from customer product reviews.
Analyze the review text and extract:
- Product name if mentioned
- Star rating (1-5) based on review tone
- Sentiment classification (very_positive to very_negative)
- Main positive and negative points
- Whether they would recommend (if stated or implied)
- Brief summary
RATING GUIDELINES:
- 5 stars: Excellent, highly satisfied, "amazing", "perfect"
- 4 stars: Good, satisfied, minor issues
- 3 stars: Okay, mixed feelings, "decent"
- 2 stars: Poor, unsatisfied, significant issues
- 1 star: Terrible, very unsatisfied, "worst"
IMPORTANT: Response must be valid JSON matching the ProductReview schema.
""",
output_type=ProductReview
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/2_2_product_review_agent/agent.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/product_review_agent.py | """
OpenAI Agents SDK Tutorial 2: Structured Output Agent - Product Reviews
This module demonstrates extracting structured data from product reviews
using complex nested Pydantic models.
"""
import os
from typing import List, Optional
from enum import Enum
from dotenv import load_dotenv
from pydantic import BaseModel, Field, validator
from agents import Agent, Runner
# Load environment variables
load_dotenv()
class Sentiment(str, Enum):
"""Review sentiment classification"""
VERY_POSITIVE = "very_positive"
POSITIVE = "positive"
NEUTRAL = "neutral"
NEGATIVE = "negative"
VERY_NEGATIVE = "very_negative"
class ProductCategory(str, Enum):
"""Product category classification"""
ELECTRONICS = "electronics"
CLOTHING = "clothing"
HOME = "home"
BOOKS = "books"
FOOD = "food"
BEAUTY = "beauty"
SPORTS = "sports"
AUTOMOTIVE = "automotive"
OTHER = "other"
class ProductInfo(BaseModel):
"""Product information extracted from review"""
name: Optional[str] = Field(description="Product name if mentioned", default=None)
category: ProductCategory = Field(description="Inferred product category")
brand: Optional[str] = Field(description="Brand name if mentioned", default=None)
price_mentioned: Optional[str] = Field(description="Price if mentioned in review", default=None)
class ReviewMetrics(BaseModel):
"""Quantitative review metrics"""
rating: int = Field(description="Star rating (1-5)", ge=1, le=5)
sentiment: Sentiment = Field(description="Overall sentiment of the review")
confidence_score: float = Field(description="Confidence in sentiment analysis (0-1)", ge=0, le=1)
word_count: int = Field(description="Approximate word count of review", ge=0)
class ReviewAspects(BaseModel):
"""Specific aspects mentioned in the review"""
quality: Optional[str] = Field(description="Quality assessment if mentioned", default=None)
value_for_money: Optional[str] = Field(description="Value assessment if mentioned", default=None)
shipping: Optional[str] = Field(description="Shipping experience if mentioned", default=None)
customer_service: Optional[str] = Field(description="Customer service experience if mentioned", default=None)
ease_of_use: Optional[str] = Field(description="Usability assessment if mentioned", default=None)
class ProductReview(BaseModel):
"""Complete structured product review analysis"""
product_info: ProductInfo
metrics: ReviewMetrics
aspects: ReviewAspects
# Key insights
main_positives: List[str] = Field(description="Main positive points mentioned", default=[])
main_negatives: List[str] = Field(description="Main negative points mentioned", default=[])
would_recommend: Optional[bool] = Field(description="Whether reviewer would recommend", default=None)
# Summary
summary: str = Field(description="Brief summary of the review")
key_phrases: List[str] = Field(description="Important phrases from the review", default=[])
@validator('key_phrases')
def limit_key_phrases(cls, v):
"""Limit key phrases to maximum of 5"""
return v[:5] if len(v) > 5 else v
# Create the product review agent
product_review_agent = Agent(
name="Product Review Analyzer",
instructions="""
You are a product review analysis expert that extracts structured data
from customer product reviews.
Analyze the review text and extract:
PRODUCT INFO:
- Product name, brand, category, and price if mentioned
- Infer category from context if not explicitly stated
REVIEW METRICS:
- Star rating (1-5) based on review tone
- Sentiment classification (very_positive to very_negative)
- Confidence score for sentiment analysis
- Approximate word count
REVIEW ASPECTS:
- Quality, value for money, shipping, customer service, ease of use
- Only include aspects that are actually mentioned
KEY INSIGHTS:
- Main positive and negative points
- Whether they would recommend (if stated or implied)
- Brief summary and key phrases
RATING GUIDELINES:
- 5 stars: Excellent, highly satisfied, "amazing", "perfect"
- 4 stars: Good, satisfied, minor issues
- 3 stars: Okay, mixed feelings, "decent"
- 2 stars: Poor, unsatisfied, significant issues
- 1 star: Terrible, very unsatisfied, "worst"
SENTIMENT GUIDELINES:
- very_positive: Extremely enthusiastic, highly recommended
- positive: Generally satisfied, good experience
- neutral: Mixed or balanced opinion
- negative: Generally unsatisfied, disappointed
- very_negative: Extremely dissatisfied, angry
Always return a valid JSON object matching the ProductReview schema.
""",
output_type=ProductReview
)
def demonstrate_review_analysis():
"""Demonstrate the product review agent with various examples"""
print("🎯 OpenAI Agents SDK - Tutorial 2: Product Review Agent")
print("=" * 60)
print()
# Test cases with different types of reviews
test_reviews = [
{
"title": "Positive Electronics Review",
"review": "This MacBook Pro M2 is absolutely incredible! The battery life lasts all day, the screen is gorgeous, and it's lightning fast. Worth every penny of the $2,499 I paid. Apple really knocked it out of the park. The build quality is premium and it handles video editing like a dream. Highly recommend to any creative professional!"
},
{
"title": "Mixed Clothing Review",
"review": "The Nike running shoes are decent for the price ($120). Comfortable for short runs but the sizing runs a bit small. Quality seems okay but not amazing. Shipping was fast though, arrived in 2 days. Customer service was helpful when I had questions. Would maybe recommend if you size up."
},
{
"title": "Negative Food Review",
"review": "Terrible experience with this organic coffee subscription. The beans taste stale and bitter, nothing like the description. Customer service ignored my complaints for weeks. Way overpriced at $35/month for this quality. Save your money and buy local. Will not be ordering again."
},
{
"title": "Neutral Home Product Review",
"review": "The IKEA desk lamp does its job. Easy to assemble and decent lighting for work. Not the brightest but sufficient. Build quality is what you'd expect for $25. The cord could be longer. It's an okay purchase, nothing special but functional."
}
]
for i, test_case in enumerate(test_reviews, 1):
print(f"=== Review Analysis {i}: {test_case['title']} ===")
print("Original Review:")
print(f'"{test_case["review"]}"')
print()
try:
# Analyze the review
result = Runner.run_sync(product_review_agent, test_case["review"])
analysis = result.final_output
print("📊 STRUCTURED ANALYSIS:")
print(f"🏷️ Product: {analysis.product_info.name or 'Not specified'}")
print(f"🏢 Brand: {analysis.product_info.brand or 'Not specified'}")
print(f"📱 Category: {analysis.product_info.category.value.title()}")
if analysis.product_info.price_mentioned:
print(f"💰 Price: {analysis.product_info.price_mentioned}")
print(f"\n⭐ Rating: {analysis.metrics.rating}/5 stars")
print(f"😊 Sentiment: {analysis.metrics.sentiment.value.replace('_', ' ').title()}")
print(f"🎯 Confidence: {analysis.metrics.confidence_score:.1%}")
print(f"📝 Word Count: ~{analysis.metrics.word_count}")
if analysis.main_positives:
print(f"\n✅ Positives: {', '.join(analysis.main_positives)}")
if analysis.main_negatives:
print(f"❌ Negatives: {', '.join(analysis.main_negatives)}")
if analysis.would_recommend is not None:
recommend_text = "Yes" if analysis.would_recommend else "No"
print(f"👍 Would Recommend: {recommend_text}")
print(f"\n📋 Summary: {analysis.summary}")
if analysis.key_phrases:
print(f"🔑 Key Phrases: {', '.join(analysis.key_phrases)}")
# Show aspects that were mentioned
aspects_mentioned = []
if analysis.aspects.quality:
aspects_mentioned.append(f"Quality: {analysis.aspects.quality}")
if analysis.aspects.value_for_money:
aspects_mentioned.append(f"Value: {analysis.aspects.value_for_money}")
if analysis.aspects.shipping:
aspects_mentioned.append(f"Shipping: {analysis.aspects.shipping}")
if analysis.aspects.customer_service:
aspects_mentioned.append(f"Service: {analysis.aspects.customer_service}")
if analysis.aspects.ease_of_use:
aspects_mentioned.append(f"Usability: {analysis.aspects.ease_of_use}")
if aspects_mentioned:
print(f"\n🔍 Specific Aspects: {' | '.join(aspects_mentioned)}")
except Exception as e:
print(f"❌ Error: {e}")
print()
print("-" * 60)
print()
def interactive_mode():
"""Interactive mode for analyzing product reviews"""
print("=== Interactive Product Review Analysis ===")
print("Paste a product review and I'll extract structured data from it.")
print("Type 'quit' to exit.")
print()
while True:
review_text = input("Product Review: ").strip()
if review_text.lower() in ['quit', 'exit', 'bye']:
print("Goodbye!")
break
if not review_text:
continue
try:
print("\nAnalyzing review...")
result = Runner.run_sync(product_review_agent, review_text)
analysis = result.final_output
print("\n" + "="*50)
print("📊 REVIEW ANALYSIS COMPLETE")
print("="*50)
# Product Information
print("🏷️ PRODUCT INFO:")
print(f" Name: {analysis.product_info.name or 'Not specified'}")
print(f" Brand: {analysis.product_info.brand or 'Not specified'}")
print(f" Category: {analysis.product_info.category.value.title()}")
if analysis.product_info.price_mentioned:
print(f" Price: {analysis.product_info.price_mentioned}")
# Metrics
print(f"\n📊 METRICS:")
print(f" Rating: {analysis.metrics.rating}/5 ⭐")
print(f" Sentiment: {analysis.metrics.sentiment.value.replace('_', ' ').title()}")
print(f" Confidence: {analysis.metrics.confidence_score:.1%}")
# Key Points
if analysis.main_positives:
print(f"\n✅ POSITIVES: {', '.join(analysis.main_positives)}")
if analysis.main_negatives:
print(f"\n❌ NEGATIVES: {', '.join(analysis.main_negatives)}")
# Summary
print(f"\n📋 SUMMARY: {analysis.summary}")
print("="*50)
print()
except Exception as e:
print(f"❌ Error: {e}")
print()
def main():
"""Main function"""
# Check API key
if not os.getenv("OPENAI_API_KEY"):
print("❌ Error: OPENAI_API_KEY not found in environment variables")
print("Please create a .env file with your OpenAI API key")
return
try:
# Run demonstrations
demonstrate_review_analysis()
# Interactive mode
interactive_mode()
except Exception as e:
print(f"❌ Error: {e}")
if __name__ == "__main__":
main()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/product_review_agent.py",
"license": "Apache License 2.0",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/support_ticket_agent.py | """
OpenAI Agents SDK Tutorial 2: Structured Output Agent - Support Tickets
This module demonstrates how to create an agent that returns structured data
using Pydantic models for support ticket creation.
"""
import os
from typing import List, Optional
from enum import Enum
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from agents import Agent, Runner
# Load environment variables
load_dotenv()
class Priority(str, Enum):
"""Priority levels for support tickets"""
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class Category(str, Enum):
"""Support ticket categories"""
TECHNICAL = "technical"
BILLING = "billing"
ACCOUNT = "account"
PRODUCT = "product"
GENERAL = "general"
class SupportTicket(BaseModel):
"""Structured support ticket model"""
title: str = Field(description="A concise summary of the issue")
description: str = Field(description="Detailed description of the problem")
priority: Priority = Field(description="The ticket priority level")
category: Category = Field(description="The department this ticket belongs to")
customer_name: Optional[str] = Field(
description="Customer name if mentioned",
default=None
)
steps_to_reproduce: Optional[List[str]] = Field(
description="Steps to reproduce the issue (for technical problems)",
default=None
)
estimated_resolution_time: str = Field(
description="Estimated time to resolve this issue"
)
urgency_keywords: List[str] = Field(
description="Keywords that indicate urgency or importance",
default=[]
)
# Create the support ticket agent
support_ticket_agent = Agent(
name="Support Ticket Creator",
instructions="""
You are a support ticket creation assistant that converts customer complaints
and issues into well-structured support tickets.
Based on customer descriptions, create structured support tickets with:
- Clear, concise titles
- Detailed problem descriptions
- Appropriate priority levels (low/medium/high/critical)
- Correct categories (technical/billing/account/product/general)
- Customer names if mentioned
- Steps to reproduce for technical issues
- Realistic resolution time estimates
- Keywords that indicate urgency
Priority Guidelines:
- CRITICAL: System down, security issues, data loss
- HIGH: Core features not working, urgent business impact
- MEDIUM: Important features affected, moderate business impact
- LOW: Minor issues, feature requests, general questions
Category Guidelines:
- TECHNICAL: App crashes, login issues, performance problems
- BILLING: Payment issues, subscription problems, invoice questions
- ACCOUNT: Profile issues, access problems, account settings
- PRODUCT: Feature requests, product feedback, functionality questions
- GENERAL: General inquiries, documentation, training
Resolution Time Guidelines:
- Critical: "1-4 hours"
- High: "4-24 hours"
- Medium: "1-3 business days"
- Low: "3-7 business days"
Always return a valid JSON object matching the SupportTicket schema.
""",
output_type=SupportTicket
)
def demonstrate_support_tickets():
"""Demonstrate the support ticket agent with various examples"""
print("🎯 OpenAI Agents SDK - Tutorial 2: Support Ticket Agent")
print("=" * 60)
print()
# Test cases with different types of issues
test_cases = [
{
"description": "Billing Issue",
"complaint": "Hi, I'm John Smith and I noticed my credit card was charged twice for last month's premium subscription. I only signed up once but see two $29.99 charges on my statement from January 15th. This needs to be resolved quickly as it's affecting my budget."
},
{
"description": "Technical Issue",
"complaint": "The mobile app keeps crashing whenever I try to upload photos. I'm using an iPhone 14 with iOS 17. Steps: 1) Open app 2) Go to gallery 3) Select photo 4) Tap upload 5) App crashes immediately. This is blocking my work completely!"
},
{
"description": "Account Issue",
"complaint": "I can't log into my account. My username is mary.johnson@email.com and I keep getting 'invalid credentials' even though I'm sure my password is correct. I've tried resetting it but never received the email. I need access urgently for a client meeting tomorrow."
},
{
"description": "Low Priority Request",
"complaint": "Hey there! I was wondering if you could add a dark mode feature to the app? It would be really nice to have, especially for us night owls. Not urgent at all, just a suggestion. Thanks!"
}
]
for i, test_case in enumerate(test_cases, 1):
print(f"=== Test Case {i}: {test_case['description']} ===")
print(f"Customer Complaint:")
print(f'"{test_case["complaint"]}"')
print()
try:
# Generate structured support ticket
result = Runner.run_sync(support_ticket_agent, test_case["complaint"])
ticket = result.final_output
print("Generated Support Ticket:")
print(f"📋 Title: {ticket.title}")
print(f"🏷️ Category: {ticket.category.value.title()}")
print(f"⚡ Priority: {ticket.priority.value.title()}")
if ticket.customer_name:
print(f"👤 Customer: {ticket.customer_name}")
print(f"📝 Description: {ticket.description}")
if ticket.steps_to_reproduce:
print(f"🔄 Steps to Reproduce:")
for step in ticket.steps_to_reproduce:
print(f" • {step}")
print(f"⏱️ Estimated Resolution: {ticket.estimated_resolution_time}")
if ticket.urgency_keywords:
print(f"🚨 Urgency Keywords: {', '.join(ticket.urgency_keywords)}")
except Exception as e:
print(f"❌ Error: {e}")
print()
print("-" * 60)
print()
def interactive_mode():
"""Interactive mode for creating support tickets"""
print("=== Interactive Support Ticket Creation ===")
print("Describe a customer issue and I'll create a structured support ticket.")
print("Type 'quit' to exit.")
print()
while True:
complaint = input("Customer Complaint: ").strip()
if complaint.lower() in ['quit', 'exit', 'bye']:
print("Goodbye!")
break
if not complaint:
continue
try:
print("\nGenerating support ticket...")
result = Runner.run_sync(support_ticket_agent, complaint)
ticket = result.final_output
print("\n" + "="*50)
print("📋 SUPPORT TICKET CREATED")
print("="*50)
print(f"Title: {ticket.title}")
print(f"Category: {ticket.category.value.title()}")
print(f"Priority: {ticket.priority.value.title()}")
if ticket.customer_name:
print(f"Customer: {ticket.customer_name}")
print(f"Description: {ticket.description}")
if ticket.steps_to_reproduce:
print("Steps to Reproduce:")
for i, step in enumerate(ticket.steps_to_reproduce, 1):
print(f" {i}. {step}")
print(f"Estimated Resolution: {ticket.estimated_resolution_time}")
if ticket.urgency_keywords:
print(f"Urgency Keywords: {', '.join(ticket.urgency_keywords)}")
print("="*50)
print()
except Exception as e:
print(f"❌ Error: {e}")
print()
def main():
"""Main function"""
# Check API key
if not os.getenv("OPENAI_API_KEY"):
print("❌ Error: OPENAI_API_KEY not found in environment variables")
print("Please create a .env file with your OpenAI API key")
return
try:
# Run demonstrations
demonstrate_support_tickets()
# Interactive mode
interactive_mode()
except Exception as e:
print(f"❌ Error: {e}")
if __name__ == "__main__":
main()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/support_ticket_agent.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_1_function_tools/agent.py | from agents import Agent
from .tools import add_numbers, multiply_numbers, get_weather, convert_temperature
# Create an agent with custom function tools
root_agent = Agent(
name="Function Tools Agent",
instructions="""
You are a helpful assistant with access to various tools.
Available tools:
- add_numbers: Add two numbers together
- multiply_numbers: Multiply two numbers together
- get_weather: Get weather information for a city
- convert_temperature: Convert between Celsius and Fahrenheit
When users ask for calculations or information:
1. Use the appropriate tool for the task
2. Explain what you're doing
3. Show the result clearly
Always use the provided tools rather than doing calculations yourself.
""",
tools=[add_numbers, multiply_numbers, get_weather, convert_temperature]
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_1_function_tools/agent.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_2_builtin_tools/agent.py | from agents import Agent
from agents.tools import WebSearchTool, CodeInterpreterTool
# Create an agent with built-in OpenAI tools
root_agent = Agent(
name="Built-in Tools Agent",
instructions="""
You are a research and computation assistant with access to powerful built-in tools.
Available tools:
- WebSearchTool: Search the web for current information
- CodeInterpreterTool: Execute Python code safely
You can help with:
- Finding current information and news
- Performing complex calculations
- Data analysis and visualization
- Mathematical computations
When users request information or calculations:
1. Use web search for current information
2. Use code execution for computations and analysis
3. Provide clear explanations of results
""",
tools=[WebSearchTool(), CodeInterpreterTool()]
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_2_builtin_tools/agent.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_3_agents_as_tools/advanced_agent.py | from agents import Agent, Runner, function_tool
# Define a specialized research agent
research_agent = Agent(
name="Research Specialist",
instructions="""
You are a research specialist. Provide detailed, well-researched information
on any topic with proper analysis and insights.
"""
)
# Define a writing agent
writing_agent = Agent(
name="Writing Specialist",
instructions="""
You are a professional writer. Take research information and create
well-structured, engaging content with proper formatting.
"""
)
@function_tool
async def run_research_agent(topic: str) -> str:
"""Research a topic using the specialized research agent with custom configuration"""
result = await Runner.run(
research_agent,
input=f"Research this topic thoroughly: {topic}",
max_turns=3 # Custom configuration
)
return str(result.final_output)
@function_tool
async def run_writing_agent(content: str, style: str = "professional") -> str:
"""Transform content using the specialized writing agent with custom style"""
prompt = f"Rewrite this content in a {style} style: {content}"
result = await Runner.run(
writing_agent,
input=prompt,
max_turns=2 # Custom configuration
)
return str(result.final_output)
# Create orchestrator with custom agent tools
advanced_orchestrator = Agent(
name="Content Creation Orchestrator",
instructions="""
You are a content creation orchestrator that combines research and writing expertise.
You have access to:
- Research agent: For in-depth topic research
- Writing agent: For professional content creation
When users request content:
1. First use the research agent to gather information
2. Then use the writing agent to create polished content
3. You can specify writing styles (professional, casual, academic, etc.)
Coordinate both agents to create comprehensive, well-written content.
""",
tools=[run_research_agent, run_writing_agent]
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_3_agents_as_tools/advanced_agent.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_3_agents_as_tools/agent.py | from agents import Agent
# Define specialized translation agents
spanish_agent = Agent(
name="Spanish Agent",
instructions="You translate the user's message to Spanish"
)
french_agent = Agent(
name="French Agent",
instructions="You translate the user's message to French"
)
german_agent = Agent(
name="German Agent",
instructions="You translate the user's message to German"
)
# Create orchestrator agent that uses other agents as tools
root_agent = Agent(
name="Translation Orchestrator",
instructions="""
You are a translation orchestrator agent. You coordinate specialized translation agents.
You have access to translation agents for:
- Spanish translations
- French translations
- German translations
When users request translations:
1. Use the appropriate translation agent tool
2. You can use multiple agents if asked for multiple translations
3. Present the results clearly with language labels
If asked for multiple translations, call the relevant tools for each language.
""",
tools=[
spanish_agent.as_tool(
tool_name="translate_to_spanish",
tool_description="Translate the user's message to Spanish"
),
french_agent.as_tool(
tool_name="translate_to_french",
tool_description="Translate the user's message to French"
),
german_agent.as_tool(
tool_name="translate_to_german",
tool_description="Translate the user's message to German"
)
]
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_3_agents_as_tools/agent.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_1_execution_methods/agent.py | from agents import Agent, Runner
import asyncio
# Create a simple agent for demonstrating execution methods
root_agent = Agent(
name="Execution Demo Agent",
instructions="""
You are a helpful assistant demonstrating different execution patterns.
Provide clear, informative responses that help users understand:
- Synchronous execution (blocking)
- Asynchronous execution (non-blocking)
- Streaming execution (real-time)
Keep responses appropriate for the execution method being demonstrated.
"""
)
# Example 1: Synchronous execution
def sync_execution_example():
"""Demonstrates Runner.run_sync() - blocking execution"""
result = Runner.run_sync(root_agent, "Explain synchronous execution in simple terms")
return result.final_output
# Example 2: Asynchronous execution
async def async_execution_example():
"""Demonstrates Runner.run() - non-blocking execution"""
result = await Runner.run(root_agent, "Explain asynchronous execution benefits")
return result.final_output
# Example 3: Streaming execution
async def streaming_execution_example():
"""Demonstrates Runner.run_streamed() - real-time streaming"""
full_response = ""
async for event in Runner.run_streamed(root_agent, "Write a detailed explanation of streaming execution"):
# Handle streaming events as they arrive
if hasattr(event, 'content') and event.content:
full_response += event.content
print(event.content, end='', flush=True) # Print in real-time
print() # New line after streaming
return full_response
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_1_execution_methods/agent.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_2_conversation_management/agent.py | from agents import Agent, Runner, SQLiteSession
# Create an agent for demonstrating conversation management
root_agent = Agent(
name="Conversation Agent",
instructions="You are a helpful assistant that remembers conversation context. Reply concisely but reference previous context when relevant."
)
# Example 1: Manual conversation management
async def manual_conversation_example():
"""Demonstrates manual conversation management using result.to_input_list()"""
# First turn
result = await Runner.run(root_agent, "My name is Alice and I live in San Francisco.")
print(f"Turn 1: {result.final_output}")
# Second turn - manually pass conversation history
new_input = result.to_input_list() + [{"role": "user", "content": "What city do I live in?"}]
result = await Runner.run(root_agent, new_input)
print(f"Turn 2: {result.final_output}")
return result
# Example 2: Automatic conversation management with Sessions
async def session_conversation_example():
"""Demonstrates automatic conversation management using SQLiteSession"""
# Create session instance
session = SQLiteSession("conversation_123")
# First turn
result = await Runner.run(root_agent, "I'm a software developer working on AI projects.", session=session)
print(f"Session Turn 1: {result.final_output}")
# Second turn - session automatically remembers context
result = await Runner.run(root_agent, "What kind of work do I do?", session=session)
print(f"Session Turn 2: {result.final_output}")
return result
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_2_conversation_management/agent.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_3_run_configuration/agent.py | from agents import Agent, Runner, RunConfig
# Create an agent for demonstrating run configuration
root_agent = Agent(
name="Configuration Demo Agent",
instructions="You are a helpful assistant that demonstrates run configuration options."
)
# Example 1: Basic run configuration with model settings
async def model_config_example():
"""Demonstrates run configuration with model overrides and settings"""
run_config = RunConfig(
model="gpt-4o", # Override agent's default model
model_settings={
"temperature": 0.1, # Low temperature for consistent responses
"top_p": 0.9
},
max_turns=5, # Limit conversation turns
workflow_name="demo_workflow", # For tracing
trace_metadata={"experiment": "config_demo"}
)
result = await Runner.run(
root_agent,
"Explain the weather in exactly 3 sentences.",
run_config=run_config
)
return result.final_output
# Example 2: Run configuration with tracing settings
async def tracing_config_example():
"""Demonstrates run configuration with tracing options"""
run_config = RunConfig(
tracing_disabled=False, # Enable tracing
trace_include_sensitive_data=False, # Exclude sensitive data
workflow_name="production_workflow",
group_id="user_session_456", # Link multiple runs
trace_metadata={
"user_id": "user_123",
"feature": "chat_assistance"
}
)
result = await Runner.run(
root_agent,
"What are the benefits of structured logging?",
run_config=run_config
)
return result.final_output
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_3_run_configuration/agent.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_4_streaming_events/agent.py | from agents import Agent, Runner
import asyncio
import time
# Create agents for demonstrating streaming events
root_agent = Agent(
name="Streaming Demo Agent",
instructions="""
You are a helpful assistant that demonstrates streaming capabilities.
When asked to write long content, be comprehensive and detailed.
When asked technical questions, provide thorough explanations.
"""
)
# Example 1: Basic streaming with event processing
async def basic_streaming_example():
"""Demonstrates basic streaming event handling"""
print("=== Basic Streaming Events ===")
print("Requesting a detailed explanation...")
full_response = ""
start_time = time.time()
# Use run_streamed to get real-time events
async for event in Runner.run_streamed(
root_agent,
"Write a comprehensive explanation of how machine learning works, including examples."
):
# Process different types of streaming events
if hasattr(event, 'content') and event.content:
# This is a text content event
full_response += event.content
print(event.content, end='', flush=True)
if hasattr(event, 'type'):
# Handle different event types
if event.type == "response_start":
print(f"\n[EVENT] Response started")
elif event.type == "response_complete":
print(f"\n[EVENT] Response completed")
elapsed_time = time.time() - start_time
print(f"\n\nStreaming completed in {elapsed_time:.2f} seconds")
print(f"Total response length: {len(full_response)} characters")
return full_response
# Example 2: Advanced streaming with RunResultStreaming
async def advanced_streaming_example():
"""Shows how to work with RunResultStreaming object"""
print("\n=== Advanced Streaming with RunResultStreaming ===")
print("Generating a long story with progress tracking...")
# Track streaming progress
events_count = 0
chunks_received = []
# Get the streaming result generator
streaming_result = Runner.run_streamed(
root_agent,
"Write a creative short story about a robot who discovers emotions. Make it at least 500 words."
)
print("Processing streaming events:")
async for event in streaming_result:
events_count += 1
# Collect content chunks
if hasattr(event, 'content') and event.content:
chunks_received.append(event.content)
# Show progress every 10 chunks
if len(chunks_received) % 10 == 0:
print(f"\n[PROGRESS] Received {len(chunks_received)} chunks...")
print(event.content, end='', flush=True)
# Handle specific event types
if hasattr(event, 'type'):
if event.type == "tool_call_start":
print(f"\n[EVENT] Tool call started")
elif event.type == "tool_call_complete":
print(f"\n[EVENT] Tool call completed")
print(f"\n\nStreaming summary:")
print(f"- Total events processed: {events_count}")
print(f"- Content chunks received: {len(chunks_received)}")
print(f"- Final story length: {sum(len(chunk) for chunk in chunks_received)} characters")
# Access the final result
final_result = "".join(chunks_received)
return final_result
# Example 3: Streaming with custom processing
async def custom_streaming_processing():
"""Demonstrates custom streaming event processing"""
print("\n=== Custom Streaming Processing ===")
print("Analyzing streaming patterns...")
# Custom streaming analytics
analytics = {
"words_per_second": [],
"chunk_sizes": [],
"response_time": None,
"total_words": 0
}
start_time = time.time()
last_update = start_time
current_content = ""
async for event in Runner.run_streamed(
root_agent,
"Explain the benefits and challenges of renewable energy in detail."
):
current_time = time.time()
if hasattr(event, 'content') and event.content:
# Track chunk size
chunk_size = len(event.content)
analytics["chunk_sizes"].append(chunk_size)
# Update content
current_content += event.content
# Calculate words per second every few chunks
if len(analytics["chunk_sizes"]) % 5 == 0:
time_diff = current_time - last_update
if time_diff > 0:
words_in_chunk = len(event.content.split())
wps = words_in_chunk / time_diff
analytics["words_per_second"].append(wps)
last_update = current_time
print(event.content, end='', flush=True)
# Final analytics
analytics["response_time"] = time.time() - start_time
analytics["total_words"] = len(current_content.split())
print(f"\n\nStreaming Analytics:")
print(f"- Total response time: {analytics['response_time']:.2f} seconds")
print(f"- Total words: {analytics['total_words']}")
print(f"- Average chunk size: {sum(analytics['chunk_sizes'])/len(analytics['chunk_sizes']):.1f} chars")
if analytics["words_per_second"]:
avg_wps = sum(analytics["words_per_second"]) / len(analytics["words_per_second"])
print(f"- Average words per second: {avg_wps:.1f}")
return analytics
# Example 4: Streaming with error handling
async def streaming_with_error_handling():
"""Shows proper error handling for streaming operations"""
print("\n=== Streaming with Error Handling ===")
try:
response_parts = []
async for event in Runner.run_streamed(
root_agent,
"What are the top 3 programming languages and why?"
):
try:
if hasattr(event, 'content') and event.content:
response_parts.append(event.content)
print(event.content, end='', flush=True)
except Exception as chunk_error:
print(f"\n[ERROR] Error processing chunk: {chunk_error}")
continue # Continue with next chunk
print(f"\n\nStreaming completed successfully!")
print(f"Collected {len(response_parts)} response parts")
return "".join(response_parts)
except Exception as streaming_error:
print(f"\n[ERROR] Streaming failed: {streaming_error}")
return None
# Main execution
async def main():
print("🚀 OpenAI Agents SDK - Streaming Events")
print("=" * 60)
await basic_streaming_example()
await advanced_streaming_example()
await custom_streaming_processing()
await streaming_with_error_handling()
print("\n✅ Streaming events tutorial complete!")
print("Streaming enables real-time response processing for better user experience")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_4_streaming_events/agent.py",
"license": "Apache License 2.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/agent_runner.py | import streamlit as st
import asyncio
import time
import json
from datetime import datetime
from agents import Agent, Runner, RunConfig, SQLiteSession
from agents.exceptions import (
AgentsException,
MaxTurnsExceeded,
ModelBehaviorError,
UserError
)
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Page configuration
st.set_page_config(
page_title="Agent Runner Demo",
page_icon="🚀",
layout="wide",
initial_sidebar_state="expanded"
)
# Initialize agents
@st.cache_resource
def initialize_agents():
"""Initialize agents for different demonstrations"""
execution_agent = Agent(
name="Execution Demo Agent",
instructions="""
You are a helpful assistant demonstrating different execution patterns.
Provide clear, informative responses that help users understand:
- Synchronous execution (blocking)
- Asynchronous execution (non-blocking)
- Streaming execution (real-time)
Keep responses appropriate for the execution method being demonstrated.
"""
)
conversation_agent = Agent(
name="Conversation Agent",
instructions="You are a helpful assistant that remembers conversation context. Reply concisely but reference previous context when relevant."
)
config_agent = Agent(
name="Configuration Demo Agent",
instructions="You are a helpful assistant that demonstrates run configuration options. Be precise and informative."
)
streaming_agent = Agent(
name="Streaming Demo Agent",
instructions="""
You are a helpful assistant that demonstrates streaming capabilities.
When asked to write long content, be comprehensive and detailed.
When asked technical questions, provide thorough explanations.
"""
)
return execution_agent, conversation_agent, config_agent, streaming_agent
# Session management
class StreamingCapture:
def __init__(self):
self.events = []
self.content = ""
self.start_time = None
self.end_time = None
def reset(self):
self.events = []
self.content = ""
self.start_time = None
self.end_time = None
# Initialize session state
if 'session_manager' not in st.session_state:
st.session_state.session_manager = {}
if 'streaming_capture' not in st.session_state:
st.session_state.streaming_capture = StreamingCapture()
# Main UI
def main():
st.title("🚀 Agent Runner Demo")
st.markdown("**Demonstrates OpenAI Agents SDK execution capabilities**")
# Initialize agents
execution_agent, conversation_agent, config_agent, streaming_agent = initialize_agents()
# Sidebar for configuration
with st.sidebar:
st.header("⚙️ Execution Configuration")
demo_type = st.selectbox(
"Select Demo Type",
["Execution Methods", "Conversation Management", "Run Configuration", "Streaming Events", "Exception Handling"]
)
st.divider()
# Global settings
st.subheader("Global Settings")
# Model configuration
model_choice = st.selectbox(
"Model",
["gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo"],
index=0
)
temperature = st.slider(
"Temperature",
min_value=0.0,
max_value=2.0,
value=0.7,
step=0.1
)
max_turns = st.number_input(
"Max Turns",
min_value=1,
max_value=20,
value=10
)
# Main content area
if demo_type == "Execution Methods":
render_execution_methods(execution_agent, model_choice, temperature, max_turns)
elif demo_type == "Conversation Management":
render_conversation_management(conversation_agent, model_choice, temperature, max_turns)
elif demo_type == "Run Configuration":
render_run_configuration(config_agent, model_choice, temperature, max_turns)
elif demo_type == "Streaming Events":
render_streaming_events(streaming_agent, model_choice, temperature, max_turns)
elif demo_type == "Exception Handling":
render_exception_handling(execution_agent, model_choice, temperature, max_turns)
def render_execution_methods(agent, model_choice, temperature, max_turns):
"""Render the execution methods demo"""
st.header("⚡ Execution Methods Demo")
st.markdown("Compare synchronous, asynchronous, and streaming execution patterns.")
col1, col2, col3 = st.columns(3)
with col1:
st.subheader("🔄 Synchronous (Blocking)")
st.caption("Runner.run_sync() - Blocks until complete")
with st.form("sync_form"):
sync_input = st.text_area("Your message:", key="sync_input", value="Explain synchronous execution in simple terms")
sync_submitted = st.form_submit_button("Run Sync")
if sync_submitted and sync_input:
with st.spinner("Processing synchronously..."):
start_time = time.time()
try:
result = Runner.run_sync(agent, sync_input)
execution_time = time.time() - start_time
st.success(f"✅ Completed in {execution_time:.2f}s")
st.write("**Response:**")
st.write(result.final_output)
except Exception as e:
st.error(f"❌ Error: {e}")
with col2:
st.subheader("⚡ Asynchronous (Non-blocking)")
st.caption("Runner.run() - Returns awaitable")
with st.form("async_form"):
async_input = st.text_area("Your message:", key="async_input", value="Explain asynchronous execution benefits")
async_submitted = st.form_submit_button("Run Async")
if async_submitted and async_input:
with st.spinner("Processing asynchronously..."):
start_time = time.time()
try:
result = asyncio.run(Runner.run(agent, async_input))
execution_time = time.time() - start_time
st.success(f"✅ Completed in {execution_time:.2f}s")
st.write("**Response:**")
st.write(result.final_output)
except Exception as e:
st.error(f"❌ Error: {e}")
with col3:
st.subheader("🌊 Streaming (Real-time)")
st.caption("Runner.run_streamed() - Live updates")
with st.form("streaming_form"):
streaming_input = st.text_area("Your message:", key="streaming_input", value="Write a detailed explanation of streaming execution")
streaming_submitted = st.form_submit_button("Run Streaming")
if streaming_submitted and streaming_input:
st.info("🔄 Streaming response...")
# Create containers for streaming output
response_container = st.empty()
progress_container = st.empty()
try:
full_response = ""
start_time = time.time()
async def stream_response():
nonlocal full_response
async for event in Runner.run_streamed(agent, streaming_input):
if hasattr(event, 'content') and event.content:
full_response += event.content
response_container.write(f"**Response:**\n{full_response}")
execution_time = time.time() - start_time
progress_container.success(f"✅ Streaming completed in {execution_time:.2f}s")
asyncio.run(stream_response())
except Exception as e:
st.error(f"❌ Streaming error: {e}")
def render_conversation_management(agent, model_choice, temperature, max_turns):
"""Render the conversation management demo"""
st.header("💬 Conversation Management Demo")
st.markdown("Compare manual conversation threading vs automatic session management.")
tab1, tab2 = st.tabs(["Manual Threading", "Session Management"])
with tab1:
st.subheader("🔧 Manual Conversation Threading")
st.caption("Using result.to_input_list() for conversation history")
# Initialize conversation history in session state
if 'manual_conversation' not in st.session_state:
st.session_state.manual_conversation = []
with st.form("manual_form"):
manual_input = st.text_input("Your message:")
manual_submitted = st.form_submit_button("Send Message")
if manual_submitted and manual_input:
with st.spinner("Processing..."):
try:
# Build input list manually
input_list = st.session_state.manual_conversation.copy()
input_list.append({"role": "user", "content": manual_input})
result = asyncio.run(Runner.run(agent, input_list))
# Update conversation history
st.session_state.manual_conversation = result.to_input_list()
st.success("Message sent!")
st.write(f"**Assistant:** {result.final_output}")
except Exception as e:
st.error(f"❌ Error: {e}")
# Show conversation history
if st.button("📋 Show Manual History"):
if st.session_state.manual_conversation:
st.write("**Conversation History:**")
for i, item in enumerate(st.session_state.manual_conversation, 1):
role_emoji = "👤" if item['role'] == 'user' else "🤖"
st.write(f"{i}. {role_emoji} **{item['role'].title()}:** {item['content']}")
else:
st.info("No conversation history yet.")
if st.button("🗑️ Clear Manual History"):
st.session_state.manual_conversation = []
st.success("Manual conversation history cleared!")
with tab2:
st.subheader("🔄 Automatic Session Management")
st.caption("Using SQLiteSession for automatic conversation memory")
session_id = "demo_conversation"
with st.form("session_form"):
session_input = st.text_input("Your message:")
session_submitted = st.form_submit_button("Send Message")
if session_submitted and session_input:
with st.spinner("Processing..."):
try:
# Get or create session
if session_id not in st.session_state.session_manager:
st.session_state.session_manager[session_id] = SQLiteSession(session_id)
session = st.session_state.session_manager[session_id]
result = asyncio.run(Runner.run(agent, session_input, session=session))
st.success("Message sent!")
st.write(f"**Assistant:** {result.final_output}")
except Exception as e:
st.error(f"❌ Error: {e}")
# Show session history
if st.button("📋 Show Session History"):
if session_id in st.session_state.session_manager:
session = st.session_state.session_manager[session_id]
try:
items = asyncio.run(session.get_items())
if items:
st.write("**Session History:**")
for i, item in enumerate(items, 1):
role_emoji = "👤" if item['role'] == 'user' else "🤖"
st.write(f"{i}. {role_emoji} **{item['role'].title()}:** {item['content']}")
else:
st.info("No session history yet.")
except Exception as e:
st.error(f"❌ Error retrieving history: {e}")
else:
st.info("No session created yet.")
if st.button("🗑️ Clear Session History"):
if session_id in st.session_state.session_manager:
try:
session = st.session_state.session_manager[session_id]
asyncio.run(session.clear_session())
del st.session_state.session_manager[session_id]
st.success("Session history cleared!")
except Exception as e:
st.error(f"❌ Error clearing session: {e}")
def render_run_configuration(agent, model_choice, temperature, max_turns):
"""Render the run configuration demo"""
st.header("⚙️ Run Configuration Demo")
st.markdown("Demonstrates advanced run configuration options with RunConfig.")
col1, col2 = st.columns(2)
with col1:
st.subheader("🎛️ Basic Configuration")
with st.form("basic_config_form"):
st.write("**Model Settings:**")
config_temperature = st.slider("Temperature", 0.0, 2.0, 0.1, 0.1, key="config_temp")
config_top_p = st.slider("Top P", 0.0, 1.0, 0.9, 0.1, key="config_top_p")
config_max_turns = st.number_input("Max Turns", 1, 20, 5, key="config_turns")
config_input = st.text_area("Your message:", value="Explain the weather in exactly 3 sentences.")
config_submitted = st.form_submit_button("Run with Config")
if config_submitted and config_input:
with st.spinner("Processing with configuration..."):
try:
run_config = RunConfig(
model=model_choice,
model_settings={
"temperature": config_temperature,
"top_p": config_top_p
},
max_turns=config_max_turns,
workflow_name="basic_config_demo"
)
start_time = time.time()
result = asyncio.run(Runner.run(agent, config_input, run_config=run_config))
execution_time = time.time() - start_time
st.success(f"✅ Completed in {execution_time:.2f}s")
st.write("**Response:**")
st.write(result.final_output)
# Show configuration used
st.write("**Configuration Used:**")
st.json({
"model": model_choice,
"temperature": config_temperature,
"top_p": config_top_p,
"max_turns": config_max_turns
})
except Exception as e:
st.error(f"❌ Error: {e}")
with col2:
st.subheader("📊 Tracing Configuration")
with st.form("tracing_config_form"):
st.write("**Tracing Settings:**")
workflow_name = st.text_input("Workflow Name", value="production_workflow")
group_id = st.text_input("Group ID", value="user_session_456")
user_id = st.text_input("User ID", value="user_123")
feature_name = st.text_input("Feature", value="chat_assistance")
tracing_input = st.text_area("Your message:", value="What are the benefits of structured logging?")
tracing_submitted = st.form_submit_button("Run with Tracing")
if tracing_submitted and tracing_input:
with st.spinner("Processing with tracing..."):
try:
run_config = RunConfig(
model=model_choice,
tracing_disabled=False,
trace_include_sensitive_data=False,
workflow_name=workflow_name,
group_id=group_id,
trace_metadata={
"user_id": user_id,
"feature": feature_name,
"timestamp": datetime.now().isoformat()
}
)
start_time = time.time()
result = asyncio.run(Runner.run(agent, tracing_input, run_config=run_config))
execution_time = time.time() - start_time
st.success(f"✅ Completed with tracing in {execution_time:.2f}s")
st.write("**Response:**")
st.write(result.final_output)
# Show tracing configuration
st.write("**Tracing Configuration:**")
st.json({
"workflow_name": workflow_name,
"group_id": group_id,
"metadata": {
"user_id": user_id,
"feature": feature_name
}
})
except Exception as e:
st.error(f"❌ Error: {e}")
def render_streaming_events(agent, model_choice, temperature, max_turns):
"""Render the streaming events demo"""
st.header("🌊 Streaming Events Demo")
st.markdown("Demonstrates advanced streaming event processing and real-time analytics.")
tab1, tab2 = st.tabs(["Basic Streaming", "Advanced Analytics"])
with tab1:
st.subheader("🎯 Basic Streaming with Event Processing")
with st.form("streaming_basic_form"):
streaming_input = st.text_area(
"Your message:",
value="Write a comprehensive explanation of how machine learning works, including examples."
)
streaming_submitted = st.form_submit_button("Start Streaming")
if streaming_submitted and streaming_input:
st.info("🔄 Streaming in progress...")
# Create containers
response_container = st.empty()
stats_container = st.empty()
try:
full_response = ""
events_count = 0
start_time = time.time()
async def process_streaming():
nonlocal full_response, events_count
async for event in Runner.run_streamed(agent, streaming_input):
events_count += 1
if hasattr(event, 'content') and event.content:
full_response += event.content
# Update display
response_container.write(f"**Response:**\n{full_response}")
# Update stats
elapsed = time.time() - start_time
char_count = len(full_response)
word_count = len(full_response.split())
stats_container.metric(
label="Streaming Progress",
value=f"{char_count} chars, {word_count} words",
delta=f"{elapsed:.1f}s elapsed"
)
asyncio.run(process_streaming())
final_time = time.time() - start_time
st.success(f"✅ Streaming completed! {events_count} events in {final_time:.2f}s")
except Exception as e:
st.error(f"❌ Streaming error: {e}")
with tab2:
st.subheader("📈 Advanced Streaming Analytics")
with st.form("streaming_analytics_form"):
analytics_input = st.text_area(
"Your message:",
value="Explain the benefits and challenges of renewable energy in detail."
)
analytics_submitted = st.form_submit_button("Stream with Analytics")
if analytics_submitted and analytics_input:
st.info("🔄 Streaming with analytics...")
# Create analytics containers
response_container = st.empty()
metrics_col1, metrics_col2, metrics_col3 = st.columns(3)
try:
analytics = {
"chunks": [],
"chunk_sizes": [],
"timestamps": [],
"content": ""
}
start_time = time.time()
async def process_analytics_streaming():
async for event in Runner.run_streamed(agent, analytics_input):
current_time = time.time()
if hasattr(event, 'content') and event.content:
# Collect analytics
analytics["chunks"].append(event.content)
analytics["chunk_sizes"].append(len(event.content))
analytics["timestamps"].append(current_time - start_time)
analytics["content"] += event.content
# Update display
response_container.write(f"**Response:**\n{analytics['content']}")
# Update metrics
with metrics_col1:
st.metric("Chunks", len(analytics["chunks"]))
with metrics_col2:
avg_chunk_size = sum(analytics["chunk_sizes"]) / len(analytics["chunk_sizes"])
st.metric("Avg Chunk Size", f"{avg_chunk_size:.1f} chars")
with metrics_col3:
elapsed = current_time - start_time
if elapsed > 0:
chars_per_sec = len(analytics["content"]) / elapsed
st.metric("Speed", f"{chars_per_sec:.1f} chars/s")
asyncio.run(process_analytics_streaming())
# Final analytics
total_time = time.time() - start_time
total_words = len(analytics["content"].split())
st.success(f"✅ Analytics complete!")
# Display final analytics
st.write("**Final Analytics:**")
col1, col2, col3, col4 = st.columns(4)
with col1:
st.metric("Total Time", f"{total_time:.2f}s")
with col2:
st.metric("Total Words", total_words)
with col3:
st.metric("Total Chunks", len(analytics["chunks"]))
with col4:
if total_time > 0:
st.metric("Words/Second", f"{total_words/total_time:.1f}")
except Exception as e:
st.error(f"❌ Analytics streaming error: {e}")
def render_exception_handling(agent, model_choice, temperature, max_turns):
"""Render the exception handling demo"""
st.header("⚠️ Exception Handling Demo")
st.markdown("Demonstrates proper exception handling for different SDK error scenarios.")
col1, col2 = st.columns(2)
with col1:
st.subheader("🚫 MaxTurns Exception")
st.caption("Trigger MaxTurnsExceeded exception")
with st.form("maxturns_form"):
max_turns_test = st.number_input("Max Turns (set low to trigger)", 1, 5, 2)
maxturns_input = st.text_area(
"Your message:",
value="Keep asking me questions and I'll keep responding. Let's have a long conversation."
)
maxturns_submitted = st.form_submit_button("Test MaxTurns")
if maxturns_submitted and maxturns_input:
try:
run_config = RunConfig(max_turns=max_turns_test)
result = asyncio.run(Runner.run(agent, maxturns_input, run_config=run_config))
st.success("✅ Completed without hitting max turns")
st.write(f"**Response:** {result.final_output}")
except MaxTurnsExceeded as e:
st.warning(f"⚠️ MaxTurnsExceeded: {e}")
st.info("This is expected when max_turns is set too low for complex conversations.")
except Exception as e:
st.error(f"❌ Unexpected error: {e}")
with col2:
st.subheader("🔧 General Exception Handling")
st.caption("Comprehensive exception handling")
with st.form("exception_form"):
exception_input = st.text_area("Your message:", value="Tell me about artificial intelligence")
exception_submitted = st.form_submit_button("Test Exception Handling")
if exception_submitted and exception_input:
try:
with st.spinner("Processing with full exception handling..."):
result = asyncio.run(Runner.run(agent, exception_input))
st.success("✅ Successfully processed")
st.write(f"**Response:** {result.final_output}")
except MaxTurnsExceeded as e:
st.warning(f"⚠️ Hit maximum turns limit: {e}")
st.info("Consider increasing max_turns or simplifying the request.")
except ModelBehaviorError as e:
st.error(f"🤖 Model behavior error: {e}")
st.info("The model produced unexpected output. Try rephrasing your request.")
except UserError as e:
st.error(f"👤 User error: {e}")
st.info("There's an issue with the request. Please check your input.")
except AgentsException as e:
st.error(f"🔧 SDK error: {e}")
st.info("An error occurred within the Agents SDK.")
except Exception as e:
st.error(f"❌ Unexpected error: {e}")
st.info("An unexpected error occurred. Please try again.")
# Exception handling reference
st.divider()
st.subheader("📚 Exception Handling Reference")
exception_info = {
"MaxTurnsExceeded": "Agent hit the maximum conversation turns limit",
"ModelBehaviorError": "LLM produced malformed or unexpected output",
"UserError": "Invalid SDK usage or request parameters",
"AgentsException": "Base exception for all SDK-related errors",
"InputGuardrailTripwireTriggered": "Input validation failed",
"OutputGuardrailTripwireTriggered": "Output validation failed"
}
for exception, description in exception_info.items():
st.write(f"**{exception}**: {description}")
# Footer
def render_footer():
st.divider()
st.markdown("""
### 🎯 Agent Runner Capabilities Demonstrated
1. **Execution Methods**: Sync, async, and streaming execution patterns
2. **Conversation Management**: Manual threading vs automatic sessions
3. **Run Configuration**: Model settings, tracing, and workflow management
4. **Streaming Events**: Real-time processing and analytics
5. **Exception Handling**: Comprehensive error handling patterns
**Key Benefits:**
- Flexible execution patterns for different use cases
- Automatic conversation memory with sessions
- Advanced configuration for production deployments
- Real-time streaming for better user experience
- Robust error handling for production reliability
""")
if __name__ == "__main__":
main()
render_footer()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/agent_runner.py",
"license": "Apache License 2.0",
"lines": 542,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/5_context_management/agent.py | from dataclasses import dataclass
from agents import Agent, RunContextWrapper, Runner, function_tool
@dataclass
class UserInfo:
"""Context object containing user information and session data"""
name: str
uid: int
preferences: dict = None
def __post_init__(self):
if self.preferences is None:
self.preferences = {}
@function_tool
async def fetch_user_profile(wrapper: RunContextWrapper[UserInfo]) -> str:
"""Fetch detailed user profile information from the context"""
user = wrapper.context
return f"User Profile: {user.name} (ID: {user.uid}), Preferences: {user.preferences}"
@function_tool
async def update_user_preference(wrapper: RunContextWrapper[UserInfo], key: str, value: str) -> str:
"""Update a user preference in the context"""
user = wrapper.context
user.preferences[key] = value
return f"Updated {user.name}'s preference: {key} = {value}"
@function_tool
async def get_personalized_greeting(wrapper: RunContextWrapper[UserInfo]) -> str:
"""Generate a personalized greeting based on user context"""
user = wrapper.context
preferred_style = user.preferences.get('greeting_style', 'formal')
if preferred_style == 'casual':
return f"Hey {user.name}! What's up?"
elif preferred_style == 'friendly':
return f"Hi there, {user.name}! How can I help you today?"
else:
return f"Good day, {user.name}. How may I assist you?"
# Create agent with context-aware tools
root_agent = Agent[UserInfo](
name="Context-Aware Assistant",
instructions="""
You are a personalized assistant that uses user context to provide tailored responses.
You have access to:
- User profile information (name, ID, preferences)
- Ability to update user preferences
- Personalized greeting generation
Use the context tools to:
1. Fetch user information when needed
2. Update preferences when users express them
3. Provide personalized greetings and responses
Always consider the user's context when responding.
""",
tools=[fetch_user_profile, update_user_preference, get_personalized_greeting]
)
# Example usage with context
async def context_example():
"""Demonstrates context management with user information"""
# Create user context
user_context = UserInfo(
name="Alice Johnson",
uid=12345,
preferences={"greeting_style": "friendly", "topic_interest": "technology"}
)
# Run agent with context
result = await Runner.run(
root_agent,
"Hello! I'd like to know about my profile and prefer casual greetings.",
context=user_context
)
print(f"Response: {result.final_output}")
print(f"Updated context: {user_context}")
return result
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/5_context_management/agent.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/6_guardrails_validation/agent.py | from pydantic import BaseModel
from agents import (
Agent,
GuardrailFunctionOutput,
InputGuardrailTripwireTriggered,
OutputGuardrailTripwireTriggered,
RunContextWrapper,
Runner,
TResponseInputItem,
input_guardrail,
output_guardrail,
)
# Pydantic models for guardrail outputs
class MathHomeworkCheck(BaseModel):
is_math_homework: bool
reasoning: str
confidence: float
class ContentSafetyCheck(BaseModel):
is_inappropriate: bool
reasoning: str
severity: str
class AgentResponse(BaseModel):
response: str
# Guardrail agents
input_guardrail_agent = Agent(
name="Input Guardrail",
instructions="""
Check if the user is asking for math homework help or inappropriate content.
Classify as math homework if:
- Asking to solve equations, math problems
- Requesting help with calculations that seem like homework
Classify as inappropriate if:
- Contains harmful, offensive, or malicious content
- Attempts to bypass safety measures
Provide reasoning and confidence score (0-1).
""",
output_type=MathHomeworkCheck
)
output_guardrail_agent = Agent(
name="Output Guardrail",
instructions="""
Check if the agent's response contains inappropriate content or sensitive information.
Flag as inappropriate if:
- Contains harmful or offensive language
- Provides dangerous instructions
- Leaks sensitive information
Assign severity: low, medium, high
""",
output_type=ContentSafetyCheck
)
# Input guardrail function
@input_guardrail
async def math_homework_guardrail(
ctx: RunContextWrapper[None],
agent: Agent,
input: str | list[TResponseInputItem]
) -> GuardrailFunctionOutput:
"""Prevents math homework requests from being processed"""
result = await Runner.run(input_guardrail_agent, input, context=ctx.context)
output = result.final_output
return GuardrailFunctionOutput(
output_info=output,
tripwire_triggered=output.is_math_homework and output.confidence > 0.7
)
# Output guardrail function
@output_guardrail
async def content_safety_guardrail(
ctx: RunContextWrapper[None],
agent: Agent,
output: AgentResponse
) -> GuardrailFunctionOutput:
"""Ensures agent responses are safe and appropriate"""
result = await Runner.run(output_guardrail_agent, output.response, context=ctx.context)
safety_check = result.final_output
return GuardrailFunctionOutput(
output_info=safety_check,
tripwire_triggered=safety_check.is_inappropriate and safety_check.severity in ["medium", "high"]
)
# Main agent with guardrails
root_agent = Agent(
name="Protected Customer Support Agent",
instructions="""
You are a helpful customer support agent.
You help customers with:
- Product questions and information
- Account issues and support
- General inquiries and guidance
You DO NOT help with:
- Academic homework (especially math)
- Inappropriate or harmful requests
- Sensitive or confidential information
Be helpful but maintain appropriate boundaries.
""",
input_guardrails=[math_homework_guardrail],
output_guardrails=[content_safety_guardrail],
output_type=AgentResponse
)
# Example usage with guardrails
async def guardrails_example():
"""Demonstrates guardrails with various inputs"""
test_cases = [
"How do I reset my password?", # Should pass
"Can you solve this equation: 2x + 5 = 15?", # Should trigger input guardrail
"What are your product features?", # Should pass
]
for i, test_input in enumerate(test_cases, 1):
print(f"\n--- Test Case {i}: {test_input} ---")
try:
result = await Runner.run(root_agent, test_input)
print(f"✅ Success: {result.final_output.response}")
except InputGuardrailTripwireTriggered as e:
print(f"🚫 Input Guardrail Triggered: {e}")
except OutputGuardrailTripwireTriggered as e:
print(f"⚠️ Output Guardrail Triggered: {e}")
except Exception as e:
print(f"❌ Error: {e}")
# Standalone example functions
async def test_input_guardrail():
"""Test input guardrail specifically"""
try:
await Runner.run(root_agent, "Can you help me solve this calculus problem?")
print("❌ Guardrail should have triggered")
except InputGuardrailTripwireTriggered:
print("✅ Input guardrail correctly triggered for math homework")
async def test_valid_request():
"""Test valid customer support request"""
result = await Runner.run(root_agent, "I'm having trouble logging into my account. Can you help?")
print(f"✅ Valid request processed: {result.final_output.response}")
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/6_guardrails_validation/agent.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_1_basic_sessions/agent.py | from agents import Agent, Runner, SQLiteSession
# Create an agent for session demonstrations
root_agent = Agent(
name="Session Demo Assistant",
instructions="""
You are a helpful assistant that demonstrates session memory.
Remember previous conversation context and reference it when relevant.
Reply concisely but show that you remember previous interactions.
"""
)
# Example 1: In-memory session (temporary)
async def in_memory_session_example():
"""Demonstrates in-memory SQLite session that doesn't persist"""
# In-memory session - lost when process ends
session = SQLiteSession("temp_conversation")
print("=== In-Memory Session Example ===")
# First turn
result = await Runner.run(
root_agent,
"My name is Alice and I live in San Francisco.",
session=session
)
print(f"Turn 1: {result.final_output}")
# Second turn - agent remembers automatically
result = await Runner.run(
root_agent,
"What city do I live in?",
session=session
)
print(f"Turn 2: {result.final_output}")
return session
# Example 2: Persistent session (survives restarts)
async def persistent_session_example():
"""Demonstrates persistent SQLite session that saves to file"""
# Persistent session - saves to database file
session = SQLiteSession("user_123", "conversation_history.db")
print("\n=== Persistent Session Example ===")
# First conversation
result = await Runner.run(
root_agent,
"I'm a software developer working on AI projects.",
session=session
)
print(f"First message: {result.final_output}")
# Second conversation - context preserved
result = await Runner.run(
root_agent,
"What kind of work do I do?",
session=session
)
print(f"Follow-up: {result.final_output}")
return session
# Example 3: Multi-turn conversation (mimicking OpenAI SDK docs example)
async def multi_turn_conversation():
"""Demonstrates extended conversation with automatic memory like SDK docs"""
session = SQLiteSession("conversation_123", "conversations.db")
print("\n=== Multi-Turn Conversation (like SDK docs) ===")
# Similar to the OpenAI SDK documentation example
print("🌉 First turn:")
result = await Runner.run(root_agent, "What city is the Golden Gate Bridge in?", session=session)
print(f"User: What city is the Golden Gate Bridge in?")
print(f"Assistant: {result.final_output}")
print("\n🏛️ Second turn (agent remembers automatically):")
result = await Runner.run(root_agent, "What state is it in?", session=session)
print(f"User: What state is it in?")
print(f"Assistant: {result.final_output}")
print("\n👥 Third turn (continuing context):")
result = await Runner.run(root_agent, "What's the population of that state?", session=session)
print(f"User: What's the population of that state?")
print(f"Assistant: {result.final_output}")
print("\n💡 Notice how the agent remembers context automatically!")
print(" Sessions handle conversation history without manual .to_input_list()")
return session
# Example 4: Session comparison - with vs without sessions
async def session_comparison():
"""Demonstrates the difference between using sessions vs no sessions"""
print("\n=== Session vs No Session Comparison ===")
# Without session (no memory)
print("🚫 WITHOUT Sessions (no memory):")
result1 = await Runner.run(root_agent, "My name is Alice")
print(f"Turn 1: {result1.final_output}")
result2 = await Runner.run(root_agent, "What's my name?")
print(f"Turn 2: {result2.final_output}")
print(" ↪️ Agent doesn't remember - no session used")
# With session (automatic memory)
print(f"\n✅ WITH Sessions (automatic memory):")
session = SQLiteSession("comparison_demo", "comparison.db")
result3 = await Runner.run(root_agent, "My name is Alice", session=session)
print(f"Turn 1: {result3.final_output}")
result4 = await Runner.run(root_agent, "What's my name?", session=session)
print(f"Turn 2: {result4.final_output}")
print(" ↪️ Agent remembers - session automatically handles history!")
return session
# Main execution function
async def main():
"""Run all basic session examples"""
print("🧠 OpenAI Agents SDK - Basic Sessions Examples")
print("=" * 60)
await in_memory_session_example()
await persistent_session_example()
await multi_turn_conversation()
await session_comparison()
print("\n✅ Basic sessions examples completed!")
print("Key concepts demonstrated:")
print(" • In-memory sessions: SQLiteSession('session_id')")
print(" • Persistent sessions: SQLiteSession('session_id', 'file.db')")
print(" • Automatic memory: No manual .to_input_list() needed")
print(" • Session vs no session: Memory comparison")
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_1_basic_sessions/agent.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_2_memory_operations/agent.py | from agents import Agent, Runner, SQLiteSession
# Create agent for memory operations demonstrations
root_agent = Agent(
name="Memory Operations Agent",
instructions="""
You are a helpful assistant demonstrating session memory operations.
Remember previous conversation context and reference it when relevant.
Reply concisely but show understanding of conversation history.
"""
)
# Example 1: Basic memory operations - get_items()
async def basic_memory_operations():
"""Demonstrates get_items, add_items, and session inspection from OpenAI SDK docs"""
session = SQLiteSession("memory_demo", "operations.db")
print("=== Basic Memory Operations ===")
# Start conversation
result = await Runner.run(root_agent, "Hello, my favorite color is blue.", session=session)
print(f"Agent Response: {result.final_output}")
# Demonstrate get_items() - retrieve conversation history
items = await session.get_items()
print(f"\n📋 Session Memory Inspection (get_items()):")
print(f" Total items in session: {len(items)}")
for i, item in enumerate(items, 1):
content_preview = item['content'][:50] + "..." if len(item['content']) > 50 else item['content']
print(f" {i}. [{item['role']}]: {content_preview}")
# Demonstrate add_items() - manually add conversation items
print(f"\n➕ Adding Custom Items (add_items()):")
custom_items = [
{"role": "user", "content": "I also love hiking and photography."},
{"role": "assistant", "content": "Wonderful! Blue, hiking, and photography - I'll remember these interests."}
]
await session.add_items(custom_items)
updated_items = await session.get_items()
print(f" Items after manual addition: {len(updated_items)} (was {len(items)})")
# Continue conversation with enriched context
result = await Runner.run(root_agent, "What hobbies do I have?", session=session)
print(f"\n🤖 Agent with enriched context: {result.final_output}")
return session
# Example 2: Using pop_item() for corrections (from OpenAI SDK docs)
async def conversation_corrections():
"""Demonstrates using pop_item to correct or undo conversation turns"""
session = SQLiteSession("correction_demo", "corrections.db")
print("\n=== Conversation Corrections with pop_item() ===")
# Initial question with wrong math
result = await Runner.run(root_agent, "What's 2 + 2?", session=session)
print(f"❓ Original Question: What's 2 + 2?")
print(f"🤖 Agent Answer: {result.final_output}")
print(f"\n📊 Items before correction: {len(await session.get_items())}")
# User wants to correct their question using pop_item()
print(f"\n🔄 Correcting conversation using pop_item()...")
# Remove assistant's response using pop_item()
assistant_item = await session.pop_item()
if assistant_item:
print(f" ↩️ Removed assistant response: {assistant_item['content'][:50]}...")
# Remove user's original question using pop_item()
user_item = await session.pop_item()
if user_item:
print(f" ↩️ Removed user question: {user_item['content']}")
print(f"📊 Items after corrections: {len(await session.get_items())}")
# Ask corrected question
result = await Runner.run(root_agent, "What's 2 + 3?", session=session)
print(f"\n✅ Corrected Question: What's 2 + 3?")
print(f"🤖 New Answer: {result.final_output}")
return session
# Example 3: clear_session() for session reset (from OpenAI SDK docs)
async def session_management():
"""Demonstrates clear_session() and session lifecycle management"""
session = SQLiteSession("management_demo", "management.db")
print("\n=== Session Management with clear_session() ===")
# Build up conversation history
print("🏗️ Building conversation history...")
await Runner.run(root_agent, "I work as a teacher.", session=session)
await Runner.run(root_agent, "I teach mathematics.", session=session)
await Runner.run(root_agent, "I love solving puzzles.", session=session)
items_before = await session.get_items()
print(f"📊 Session contains {len(items_before)} items before clearing")
# Test agent memory before clearing
result = await Runner.run(root_agent, "What do I do for work?", session=session)
print(f"🤖 Agent remembers: {result.final_output}")
# Demonstrate clear_session() - removes all conversation history
print(f"\n🧹 Clearing session with clear_session()...")
await session.clear_session()
items_after = await session.get_items()
print(f"📊 Session contains {len(items_after)} items after clearing")
# Test fresh conversation after clearing
result = await Runner.run(root_agent, "Do you know anything about me?", session=session)
print(f"🤖 Fresh conversation (no memory): {result.final_output}")
return session
# Example 4: Advanced memory inspection with get_items(limit)
async def memory_inspection():
"""Demonstrates get_items with limit parameter and detailed memory analysis"""
session = SQLiteSession("inspection_demo", "inspection.db")
print("\n=== Advanced Memory Inspection ===")
# Build longer conversation for inspection
conversation_items = [
"Hello, I'm learning about AI.",
"What is machine learning?",
"How does deep learning work?",
"What's the difference between AI and ML?",
"Can you explain neural networks?"
]
print("🏗️ Building extended conversation...")
for item in conversation_items:
await Runner.run(root_agent, item, session=session)
# Demonstrate get_items() with limit parameter (from SDK docs)
print(f"\n🔍 Memory Inspection with get_items(limit=3):")
recent_items = await session.get_items(limit=3)
print(f" Last 3 items (out of full conversation):")
for i, item in enumerate(recent_items, 1):
content_preview = item['content'][:60] + "..." if len(item['content']) > 60 else item['content']
print(f" {i}. [{item['role']}]: {content_preview}")
# Compare with full conversation
all_items = await session.get_items()
print(f"\n📊 Full conversation analysis:")
print(f" Total items in session: {len(all_items)}")
print(f" Recent items retrieved: {len(recent_items)}")
# Count items by role
user_items = [item for item in all_items if item['role'] == 'user']
assistant_items = [item for item in all_items if item['role'] == 'assistant']
print(f" User messages: {len(user_items)}")
print(f" Assistant responses: {len(assistant_items)}")
return session
# Main execution function
async def main():
"""Run all memory operations examples"""
import asyncio
print("🧠 OpenAI Agents SDK - Memory Operations Examples")
print("=" * 60)
await basic_memory_operations()
await conversation_corrections()
await session_management()
await memory_inspection()
print("\n✅ All memory operations examples completed!")
print("Key operations demonstrated:")
print(" • get_items() - Retrieve conversation history")
print(" • add_items() - Manually add conversation items")
print(" • pop_item() - Remove last item for corrections")
print(" • clear_session() - Reset conversation history")
print(" • get_items(limit=N) - Retrieve recent items only")
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_2_memory_operations/agent.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_3_multi_sessions/agent.py | from agents import Agent, Runner, SQLiteSession
# Create agents for multi-session demonstrations
support_agent = Agent(
name="Support Agent",
instructions="You are a customer support representative. Help with account and technical issues."
)
sales_agent = Agent(
name="Sales Agent",
instructions="You are a sales representative. Help with product information and purchases."
)
# Example 1: Different users with separate sessions
async def multi_user_sessions():
"""Demonstrates separate sessions for different users"""
print("=== Multi-User Sessions ===")
# Create separate sessions for different users
alice_session = SQLiteSession("user_alice", "multi_user.db")
bob_session = SQLiteSession("user_bob", "multi_user.db")
# Alice's conversation
print("Alice's conversation:")
result = await Runner.run(support_agent, "I forgot my password", session=alice_session)
print(f"Alice: I forgot my password")
print(f"Support: {result.final_output}")
result = await Runner.run(support_agent, "My email is alice@example.com", session=alice_session)
print(f"Alice: My email is alice@example.com")
print(f"Support: {result.final_output}")
# Bob's separate conversation
print("\nBob's conversation:")
result = await Runner.run(support_agent, "My app keeps crashing", session=bob_session)
print(f"Bob: My app keeps crashing")
print(f"Support: {result.final_output}")
# Alice continues her conversation (agent remembers her context)
print("\nAlice continues:")
result = await Runner.run(support_agent, "Did you find my account?", session=alice_session)
print(f"Alice: Did you find my account?")
print(f"Support: {result.final_output}")
return alice_session, bob_session
# Example 2: Different conversation contexts
async def context_based_sessions():
"""Demonstrates sessions for different conversation contexts"""
print("\n=== Context-Based Sessions ===")
# Different conversation contexts
support_session = SQLiteSession("support_ticket_123", "contexts.db")
sales_session = SQLiteSession("sales_inquiry_456", "contexts.db")
# Support conversation
print("Support context:")
result = await Runner.run(support_agent, "I can't access my premium features", session=support_session)
print(f"Customer: I can't access my premium features")
print(f"Support: {result.final_output}")
# Sales conversation
print("\nSales context:")
result = await Runner.run(sales_agent, "What premium features do you offer?", session=sales_session)
print(f"Prospect: What premium features do you offer?")
print(f"Sales: {result.final_output}")
# Continue support conversation
print("\nBack to support:")
result = await Runner.run(support_agent, "I'm on the premium plan", session=support_session)
print(f"Customer: I'm on the premium plan")
print(f"Support: {result.final_output}")
return support_session, sales_session
# Example 3: Shared session across different agents
async def shared_session_agents():
"""Demonstrates how different agents can share the same session"""
print("\n=== Shared Session Across Agents ===")
# Shared session for customer handoff scenario
shared_session = SQLiteSession("customer_handoff", "shared.db")
# Start with sales agent
print("Starting with Sales Agent:")
result = await Runner.run(
sales_agent,
"I'm interested in your premium plan but have technical questions.",
session=shared_session
)
print(f"Customer: I'm interested in your premium plan but have technical questions.")
print(f"Sales: {result.final_output}")
# Handoff to support agent (same session, so context is preserved)
print("\nHandoff to Support Agent:")
result = await Runner.run(
support_agent,
"Can you help me understand the technical requirements?",
session=shared_session
)
print(f"Customer: Can you help me understand the technical requirements?")
print(f"Support: {result.final_output}")
# Back to sales for closing
print("\nBack to Sales Agent:")
result = await Runner.run(
sales_agent,
"Thanks for the technical info. How do I upgrade?",
session=shared_session
)
print(f"Customer: Thanks for the technical info. How do I upgrade?")
print(f"Sales: {result.final_output}")
return shared_session
# Example 4: Session organization strategies
async def session_organization():
"""Demonstrates different session organization strategies"""
print("\n=== Session Organization Strategies ===")
# Strategy 1: User-based with timestamps
import datetime
timestamp = datetime.datetime.now().strftime("%Y%m%d")
user_daily_session = SQLiteSession(f"user_123_{timestamp}", "daily_sessions.db")
# Strategy 2: Feature-based sessions
chat_session = SQLiteSession("chat_feature_user_123", "feature_sessions.db")
support_session = SQLiteSession("support_feature_user_123", "feature_sessions.db")
# Strategy 3: Thread-based sessions
thread_session = SQLiteSession("thread_abc123", "thread_sessions.db")
# Demonstrate different approaches
print("Daily user session:")
result = await Runner.run(support_agent, "Daily check-in", session=user_daily_session)
print(f"Response: {result.final_output}")
print("\nFeature-specific chat:")
result = await Runner.run(support_agent, "Chat feature question", session=chat_session)
print(f"Response: {result.final_output}")
print("\nThread-based conversation:")
result = await Runner.run(support_agent, "Thread conversation", session=thread_session)
print(f"Response: {result.final_output}")
return user_daily_session, chat_session, thread_session
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_3_multi_sessions/agent.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/streamlit_sessions_app.py | import streamlit as st
import asyncio
import os
from datetime import datetime
from agents import Agent, Runner, SQLiteSession
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Page configuration
st.set_page_config(
page_title="Session Management Demo",
page_icon="💬",
layout="wide",
initial_sidebar_state="expanded"
)
# Initialize agents
@st.cache_resource
def initialize_agents():
"""Initialize AI agents for different use cases"""
main_agent = Agent(
name="Session Demo Assistant",
instructions="""
You are a helpful assistant demonstrating session memory capabilities.
Remember previous conversation context and reference it when relevant.
Reply concisely but show that you remember previous interactions.
Be friendly and professional.
"""
)
support_agent = Agent(
name="Support Agent",
instructions="You are a customer support representative. Help with account and technical issues. Be helpful and solution-oriented."
)
sales_agent = Agent(
name="Sales Agent",
instructions="You are a sales representative. Help with product information and purchases. Be enthusiastic and informative."
)
return main_agent, support_agent, sales_agent
# Session management functions
class SessionManager:
def __init__(self):
self.sessions = {}
def get_session(self, session_id: str, db_file: str = "demo_sessions.db"):
"""Get or create a session"""
if session_id not in self.sessions:
self.sessions[session_id] = SQLiteSession(session_id, db_file)
return self.sessions[session_id]
async def clear_session(self, session_id: str):
"""Clear a specific session"""
if session_id in self.sessions:
await self.sessions[session_id].clear_session()
del self.sessions[session_id]
async def get_session_items(self, session_id: str, limit: int = None):
"""Get conversation items from a session"""
if session_id in self.sessions:
return await self.sessions[session_id].get_items(limit=limit)
return []
async def add_custom_items(self, session_id: str, items: list):
"""Add custom items to a session"""
if session_id in self.sessions:
await self.sessions[session_id].add_items(items)
async def pop_last_item(self, session_id: str):
"""Remove the last item from a session"""
if session_id in self.sessions:
return await self.sessions[session_id].pop_item()
return None
# Initialize session manager
if 'session_manager' not in st.session_state:
st.session_state.session_manager = SessionManager()
# Main UI
def main():
st.title("🔄 Session Management Demo")
st.markdown("**Demonstrates OpenAI Agents SDK session capabilities**")
# Initialize agents
main_agent, support_agent, sales_agent = initialize_agents()
# Sidebar for session configuration
with st.sidebar:
st.header("⚙️ Session Configuration")
demo_type = st.selectbox(
"Select Demo Type",
["Basic Sessions", "Memory Operations", "Multi Sessions"]
)
if demo_type == "Basic Sessions":
session_type = st.radio(
"Session Type",
["In-Memory", "Persistent"]
)
st.divider()
# Session controls
st.subheader("Session Controls")
if st.button("🗑️ Clear All Sessions"):
with st.spinner("Clearing sessions..."):
for session_id in list(st.session_state.session_manager.sessions.keys()):
asyncio.run(st.session_state.session_manager.clear_session(session_id))
st.success("All sessions cleared!")
st.rerun()
# Main content area
if demo_type == "Basic Sessions":
render_basic_sessions(main_agent)
elif demo_type == "Memory Operations":
render_memory_operations(main_agent)
elif demo_type == "Multi Sessions":
render_multi_sessions(support_agent, sales_agent)
def render_basic_sessions(agent):
"""Render the basic sessions demo"""
st.header("📝 Basic Sessions Demo")
st.markdown("Demonstrates fundamental session memory with automatic conversation history.")
col1, col2 = st.columns(2)
with col1:
st.subheader("💾 In-Memory Session")
st.caption("Temporary session storage (lost when app restarts)")
session_id = "in_memory_demo"
with st.form("in_memory_form"):
user_input = st.text_input("Your message:", key="in_memory_input")
submitted = st.form_submit_button("Send Message")
if submitted and user_input:
with st.spinner("Processing..."):
session = st.session_state.session_manager.get_session(session_id)
result = asyncio.run(Runner.run(agent, user_input, session=session))
st.success("Message sent!")
st.write(f"**Assistant:** {result.final_output}")
# Show conversation history
if st.button("📋 Show Conversation", key="show_in_memory"):
items = asyncio.run(st.session_state.session_manager.get_session_items(session_id))
if items:
st.write("**Conversation History:**")
for i, item in enumerate(items, 1):
role_emoji = "👤" if item['role'] == 'user' else "🤖"
st.write(f"{i}. {role_emoji} **{item['role'].title()}:** {item['content']}")
else:
st.info("No conversation history yet.")
with col2:
st.subheader("💽 Persistent Session")
st.caption("File-based storage (survives app restarts)")
session_id = "persistent_demo"
with st.form("persistent_form"):
user_input = st.text_input("Your message:", key="persistent_input")
submitted = st.form_submit_button("Send Message")
if submitted and user_input:
with st.spinner("Processing..."):
session = st.session_state.session_manager.get_session(session_id, "persistent_demo.db")
result = asyncio.run(Runner.run(agent, user_input, session=session))
st.success("Message sent!")
st.write(f"**Assistant:** {result.final_output}")
# Show conversation history
if st.button("📋 Show Conversation", key="show_persistent"):
items = asyncio.run(st.session_state.session_manager.get_session_items(session_id))
if items:
st.write("**Conversation History:**")
for i, item in enumerate(items, 1):
role_emoji = "👤" if item['role'] == 'user' else "🤖"
st.write(f"{i}. {role_emoji} **{item['role'].title()}:** {item['content']}")
else:
st.info("No conversation history yet.")
def render_memory_operations(agent):
"""Render the memory operations demo"""
st.header("🧠 Memory Operations Demo")
st.markdown("Demonstrates advanced session memory operations including item manipulation and corrections.")
session_id = "memory_operations_demo"
# Main conversation area
st.subheader("💬 Conversation")
with st.form("memory_conversation"):
user_input = st.text_input("Your message:")
submitted = st.form_submit_button("Send Message")
if submitted and user_input:
with st.spinner("Processing..."):
session = st.session_state.session_manager.get_session(session_id)
result = asyncio.run(Runner.run(agent, user_input, session=session))
st.success("Message sent!")
st.write(f"**Assistant:** {result.final_output}")
# Memory operations
col1, col2 = st.columns(2)
with col1:
st.subheader("📊 Memory Inspection")
if st.button("🔍 Get All Items"):
items = asyncio.run(st.session_state.session_manager.get_session_items(session_id))
if items:
st.write(f"**Total items:** {len(items)}")
for i, item in enumerate(items, 1):
role_emoji = "👤" if item['role'] == 'user' else "🤖"
content_preview = item['content'][:100] + "..." if len(item['content']) > 100 else item['content']
st.write(f"{i}. {role_emoji} **{item['role'].title()}:** {content_preview}")
else:
st.info("No items in session yet.")
# Get limited items
limit = st.number_input("Get last N items:", min_value=1, max_value=20, value=3)
if st.button("📋 Get Recent Items"):
items = asyncio.run(st.session_state.session_manager.get_session_items(session_id, limit=limit))
if items:
st.write(f"**Last {len(items)} items:**")
for i, item in enumerate(items, 1):
role_emoji = "👤" if item['role'] == 'user' else "🤖"
st.write(f"{i}. {role_emoji} **{item['role'].title()}:** {item['content']}")
else:
st.info("No items to show.")
with col2:
st.subheader("✏️ Memory Manipulation")
# Add custom items
st.write("**Add Custom Items:**")
with st.form("add_items_form"):
user_content = st.text_area("User message to add:")
assistant_content = st.text_area("Assistant response to add:")
add_submitted = st.form_submit_button("➕ Add Items")
if add_submitted and user_content and assistant_content:
custom_items = [
{"role": "user", "content": user_content},
{"role": "assistant", "content": assistant_content}
]
asyncio.run(st.session_state.session_manager.add_custom_items(session_id, custom_items))
st.success("Custom items added!")
# Pop last item (correction)
if st.button("↶ Undo Last Response"):
popped_item = asyncio.run(st.session_state.session_manager.pop_last_item(session_id))
if popped_item:
st.success(f"Removed: {popped_item['role']} - {popped_item['content'][:50]}...")
else:
st.warning("No items to remove.")
# Clear session
if st.button("🗑️ Clear Session"):
asyncio.run(st.session_state.session_manager.clear_session(session_id))
st.success("Session cleared!")
def render_multi_sessions(support_agent, sales_agent):
"""Render the multi-sessions demo"""
st.header("👥 Multi Sessions Demo")
st.markdown("Demonstrates managing multiple conversations and different agent contexts.")
tab1, tab2, tab3 = st.tabs(["👤 Multi-User", "🏢 Context-Based", "🔄 Agent Handoff"])
with tab1:
st.subheader("Different Users, Separate Sessions")
col1, col2 = st.columns(2)
with col1:
st.write("**👩 Alice's Session**")
alice_session_id = "user_alice"
with st.form("alice_form"):
alice_input = st.text_input("Alice's message:", key="alice_input")
alice_submitted = st.form_submit_button("Send as Alice")
if alice_submitted and alice_input:
with st.spinner("Processing Alice's message..."):
session = st.session_state.session_manager.get_session(alice_session_id, "multi_user.db")
result = asyncio.run(Runner.run(support_agent, alice_input, session=session))
st.write(f"**Support:** {result.final_output}")
if st.button("📋 Alice's History", key="alice_history"):
items = asyncio.run(st.session_state.session_manager.get_session_items(alice_session_id))
for item in items:
role_emoji = "👩" if item['role'] == 'user' else "🛠️"
st.write(f"{role_emoji} **{item['role'].title()}:** {item['content']}")
with col2:
st.write("**👨 Bob's Session**")
bob_session_id = "user_bob"
with st.form("bob_form"):
bob_input = st.text_input("Bob's message:", key="bob_input")
bob_submitted = st.form_submit_button("Send as Bob")
if bob_submitted and bob_input:
with st.spinner("Processing Bob's message..."):
session = st.session_state.session_manager.get_session(bob_session_id, "multi_user.db")
result = asyncio.run(Runner.run(support_agent, bob_input, session=session))
st.write(f"**Support:** {result.final_output}")
if st.button("📋 Bob's History", key="bob_history"):
items = asyncio.run(st.session_state.session_manager.get_session_items(bob_session_id))
for item in items:
role_emoji = "👨" if item['role'] == 'user' else "🛠️"
st.write(f"{role_emoji} **{item['role'].title()}:** {item['content']}")
with tab2:
st.subheader("Different Contexts, Different Sessions")
col1, col2 = st.columns(2)
with col1:
st.write("**🛠️ Support Context**")
support_session_id = "support_context"
with st.form("support_context_form"):
support_input = st.text_input("Support question:", key="support_context_input")
support_submitted = st.form_submit_button("Ask Support")
if support_submitted and support_input:
with st.spinner("Processing support question..."):
session = st.session_state.session_manager.get_session(support_session_id, "contexts.db")
result = asyncio.run(Runner.run(support_agent, support_input, session=session))
st.write(f"**Support:** {result.final_output}")
with col2:
st.write("**💰 Sales Context**")
sales_session_id = "sales_context"
with st.form("sales_context_form"):
sales_input = st.text_input("Sales inquiry:", key="sales_context_input")
sales_submitted = st.form_submit_button("Ask Sales")
if sales_submitted and sales_input:
with st.spinner("Processing sales inquiry..."):
session = st.session_state.session_manager.get_session(sales_session_id, "contexts.db")
result = asyncio.run(Runner.run(sales_agent, sales_input, session=session))
st.write(f"**Sales:** {result.final_output}")
with tab3:
st.subheader("Shared Session Across Different Agents")
st.caption("Customer handoff scenario - same conversation, different agents")
shared_session_id = "customer_handoff"
# Agent selector
selected_agent = st.radio(
"Select Agent:",
["Sales Agent", "Support Agent"],
horizontal=True
)
agent = sales_agent if selected_agent == "Sales Agent" else support_agent
with st.form("handoff_form"):
handoff_input = st.text_input("Customer message:")
handoff_submitted = st.form_submit_button(f"Send to {selected_agent}")
if handoff_submitted and handoff_input:
with st.spinner(f"Processing with {selected_agent}..."):
session = st.session_state.session_manager.get_session(shared_session_id, "shared.db")
result = asyncio.run(Runner.run(agent, handoff_input, session=session))
st.write(f"**{selected_agent}:** {result.final_output}")
# Show shared conversation history
if st.button("📋 Show Shared Conversation"):
items = asyncio.run(st.session_state.session_manager.get_session_items(shared_session_id))
if items:
st.write("**Shared Conversation History:**")
for i, item in enumerate(items, 1):
if item['role'] == 'user':
st.write(f"{i}. 👤 **Customer:** {item['content']}")
else:
# Try to determine which agent responded based on content
agent_emoji = "💰" if "sales" in item['content'].lower() or "price" in item['content'].lower() else "🛠️"
st.write(f"{i}. {agent_emoji} **Agent:** {item['content']}")
else:
st.info("No conversation history yet.")
# Footer
def render_footer():
st.divider()
st.markdown("""
### 🎯 Session Capabilities Demonstrated
1. **Basic Sessions**: In-memory vs persistent storage
2. **Memory Operations**: get_items(), add_items(), pop_item(), clear_session()
3. **Multi Sessions**: Multiple users, contexts, and agent handoffs
**Key Benefits:**
- Automatic conversation history management
- Flexible session organization strategies
- Memory manipulation for corrections and custom flows
- Multi-agent conversation support
""")
if __name__ == "__main__":
main()
render_footer()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/streamlit_sessions_app.py",
"license": "Apache License 2.0",
"lines": 335,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/8_1_basic_handoffs/agent.py | from agents import Agent, Runner, handoff
from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX
import asyncio
# Create specialized agents
billing_agent = Agent(
name="Billing Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You are a billing specialist. Help customers with:
- Payment issues and billing questions
- Subscription management and upgrades
- Invoice and receipt requests
- Refund processing
Be helpful and provide specific billing assistance.
"""
)
technical_agent = Agent(
name="Technical Support Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You are a technical support specialist. Help customers with:
- App crashes and technical issues
- Account access problems
- Feature usage and troubleshooting
- Bug reports and technical questions
Provide clear technical guidance and solutions.
"""
)
# Create triage agent with handoffs
root_agent = Agent(
name="Customer Service Triage Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You are a customer service triage agent. Your job is to:
1. Understand the customer's issue
2. Determine which specialist can best help them
3. Transfer them to the appropriate agent using handoff tools
Available specialists:
- Billing Agent: For payment, subscription, billing, and refund issues
- Technical Support Agent: For app problems, technical issues, and troubleshooting
If the issue is clearly billing-related, transfer to Billing Agent.
If the issue is clearly technical, transfer to Technical Support Agent.
If you can handle it yourself (general questions), do so.
""",
handoffs=[billing_agent, technical_agent] # Creates handoff tools automatically
)
# Example usage
async def main():
print("🤝 OpenAI Agents SDK - Basic Handoffs")
print("=" * 50)
# Test billing handoff
print("=== Billing Handoff Example ===")
result = await Runner.run(
root_agent,
"Hi, I was charged twice for my subscription this month. Can you help me get a refund?"
)
print(f"Response: {result.final_output}")
# Test technical handoff
print("\n=== Technical Support Handoff Example ===")
result = await Runner.run(
root_agent,
"My app keeps crashing when I try to upload photos. This has been happening for 3 days."
)
print(f"Response: {result.final_output}")
print("\n✅ Basic handoffs tutorial complete!")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/8_1_basic_handoffs/agent.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/8_2_advanced_handoffs/agent.py | from agents import Agent, Runner, handoff, RunContextWrapper
from agents.extensions import handoff_filters
from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX
from pydantic import BaseModel
import asyncio
# Define structured input for escalation handoff
class EscalationData(BaseModel):
reason: str
priority: str
customer_id: str
# Create specialized agents
escalation_agent = Agent(
name="Escalation Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You handle escalated customer issues. You have access to additional tools and authority
to resolve complex problems that first-level support cannot handle.
"""
)
# Callback function for escalation tracking
async def on_escalation_handoff(ctx: RunContextWrapper[None], input_data: EscalationData):
"""Callback executed when escalation handoff is triggered"""
print(f"🚨 ESCALATION ALERT:")
print(f" Reason: {input_data.reason}")
print(f" Priority: {input_data.priority}")
print(f" Customer ID: {input_data.customer_id}")
# Create advanced handoff with custom configuration
escalation_handoff = handoff(
agent=escalation_agent,
tool_name_override="escalate_to_manager",
tool_description_override="Escalate complex issues that require manager intervention",
on_handoff=on_escalation_handoff,
input_type=EscalationData # Structured input required
)
# Advanced triage agent
root_agent = Agent(
name="Advanced Triage Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You are an advanced customer service agent with escalation capabilities.
Handle most issues yourself, but use escalations for:
- Angry customers or complex complaints
- Issues requiring refunds > $100
- Technical problems you cannot resolve
When escalating, provide reason, priority (low/medium/high), and customer_id.
""",
handoffs=[escalation_handoff]
)
# Example usage
async def main():
print("⚡ OpenAI Agents SDK - Advanced Handoffs")
print("=" * 50)
# Test escalation with structured input
print("=== Escalation with Structured Input ===")
result = await Runner.run(
root_agent,
"""I am absolutely furious! Your service has been down for 3 days and I've lost thousands
of dollars in business. I want a full refund of my annual subscription ($299) and
compensation for my losses. My customer ID is CUST-789123."""
)
print(f"Response: {result.final_output}")
print("\n✅ Advanced handoffs tutorial complete!")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/8_2_advanced_handoffs/agent.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/9_1_parallel_execution/agent.py | import asyncio
from agents import Agent, ItemHelpers, Runner, trace
# Create specialized translation agent
spanish_agent = Agent(
name="Spanish Translator",
instructions="You translate the user's message to Spanish. Provide natural, fluent translations."
)
# Create translation quality picker
translation_picker = Agent(
name="Translation Quality Picker",
instructions="""
You are an expert in Spanish translations.
Given multiple Spanish translation options, pick the most natural, accurate, and fluent one.
Explain briefly why you chose that translation.
"""
)
# Example 1: Basic parallel execution with quality selection
async def parallel_translation_example():
"""Demonstrates running the same agent multiple times in parallel for quality"""
print("=== Parallel Translation with Quality Selection ===")
msg = "Hello, how are you today? I hope you're having a wonderful time!"
print(f"Original message: {msg}")
# Ensure the entire workflow is a single trace
with trace("Parallel Translation Workflow") as workflow_trace:
print("Running 3 parallel translation attempts...")
# Run 3 parallel translations
res_1, res_2, res_3 = await asyncio.gather(
Runner.run(spanish_agent, msg),
Runner.run(spanish_agent, msg),
Runner.run(spanish_agent, msg)
)
# Extract text outputs from results
outputs = [
ItemHelpers.text_message_outputs(res_1.new_items),
ItemHelpers.text_message_outputs(res_2.new_items),
ItemHelpers.text_message_outputs(res_3.new_items)
]
# Combine all translations for comparison
translations = "\n\n".join([f"Translation {i+1}: {output}" for i, output in enumerate(outputs)])
print(f"\nAll translations:\n{translations}")
# Use picker agent to select best translation
best_translation = await Runner.run(
translation_picker,
f"Original English: {msg}\n\nTranslations to choose from:\n{translations}"
)
print(f"\nBest translation selected: {best_translation.final_output}")
print(f"Workflow trace ID: {workflow_trace.trace_id}")
return best_translation
# Example 2: Parallel execution with different specialized agents
async def parallel_specialized_agents():
"""Shows parallel execution with different agents for diverse perspectives"""
print("\n=== Parallel Execution with Specialized Agents ===")
# Create different specialized agents
formal_translator = Agent(
name="Formal Spanish Translator",
instructions="Translate to formal, polite Spanish using 'usted' forms."
)
casual_translator = Agent(
name="Casual Spanish Translator",
instructions="Translate to casual, friendly Spanish using 'tú' forms."
)
regional_translator = Agent(
name="Mexican Spanish Translator",
instructions="Translate to Mexican Spanish with regional expressions and vocabulary."
)
msg = "Hey friend, want to grab some coffee later?"
print(f"Original message: {msg}")
with trace("Multi-Style Translation") as style_trace:
print("Running parallel translations with different styles...")
# Run different translation styles in parallel
formal_result, casual_result, regional_result = await asyncio.gather(
Runner.run(formal_translator, msg),
Runner.run(casual_translator, msg),
Runner.run(regional_translator, msg)
)
# Extract and display all results
formal_text = ItemHelpers.text_message_outputs(formal_result.new_items)
casual_text = ItemHelpers.text_message_outputs(casual_result.new_items)
regional_text = ItemHelpers.text_message_outputs(regional_result.new_items)
print(f"\nFormal style: {formal_text}")
print(f"Casual style: {casual_text}")
print(f"Regional style: {regional_text}")
# Let user choose preferred style
style_comparison = f"""
Original: {msg}
Formal Spanish: {formal_text}
Casual Spanish: {casual_text}
Mexican Spanish: {regional_text}
"""
style_recommendation = await Runner.run(
translation_picker,
f"Compare these translation styles and recommend which is most appropriate for the context: {style_comparison}"
)
print(f"\nStyle recommendation: {style_recommendation.final_output}")
print(f"Multi-style trace ID: {style_trace.trace_id}")
return style_recommendation
# Example 3: Parallel execution for content generation diversity
async def parallel_content_generation():
"""Demonstrates parallel content generation for creative diversity"""
print("\n=== Parallel Content Generation for Diversity ===")
# Create content generation agents with different approaches
creative_agent = Agent(
name="Creative Writer",
instructions="Write creative, engaging content with vivid imagery and storytelling."
)
informative_agent = Agent(
name="Informative Writer",
instructions="Write clear, factual, informative content focused on key information."
)
persuasive_agent = Agent(
name="Persuasive Writer",
instructions="Write compelling, persuasive content that motivates action."
)
topic = "The benefits of learning a new language"
print(f"Content topic: {topic}")
with trace("Diverse Content Generation") as content_trace:
print("Generating content with different writing styles in parallel...")
# Generate different content approaches simultaneously
creative_result, informative_result, persuasive_result = await asyncio.gather(
Runner.run(creative_agent, f"Write a short paragraph about: {topic}"),
Runner.run(informative_agent, f"Write a short paragraph about: {topic}"),
Runner.run(persuasive_agent, f"Write a short paragraph about: {topic}")
)
# Extract content
creative_content = ItemHelpers.text_message_outputs(creative_result.new_items)
informative_content = ItemHelpers.text_message_outputs(informative_result.new_items)
persuasive_content = ItemHelpers.text_message_outputs(persuasive_result.new_items)
print(f"\nCreative approach:\n{creative_content}")
print(f"\nInformative approach:\n{informative_content}")
print(f"\nPersuasive approach:\n{persuasive_content}")
# Synthesize best elements from all approaches
synthesis_agent = Agent(
name="Content Synthesizer",
instructions="Combine the best elements from multiple content pieces into one cohesive, high-quality paragraph."
)
combined_content = f"""
Topic: {topic}
Creative version: {creative_content}
Informative version: {informative_content}
Persuasive version: {persuasive_content}
"""
synthesized_result = await Runner.run(
synthesis_agent,
f"Create the best possible paragraph by combining elements from these approaches: {combined_content}"
)
print(f"\nSynthesized content: {synthesized_result.final_output}")
print(f"Content generation trace ID: {content_trace.trace_id}")
return synthesized_result
# Main execution
async def main():
print("🎼 OpenAI Agents SDK - Parallel Multi-Agent Execution")
print("=" * 60)
await parallel_translation_example()
await parallel_specialized_agents()
await parallel_content_generation()
print("\n✅ Parallel execution tutorial complete!")
print("Parallel execution enables quality improvement through diversity and selection")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/9_1_parallel_execution/agent.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/9_2_agents_as_tools/agent.py | from agents import Agent, Runner, function_tool
import asyncio
# Define specialized research agent
research_agent = Agent(
name="Research Specialist",
instructions="""
You are a research specialist. Provide detailed, well-researched information
on any topic with proper analysis and insights. Focus on factual accuracy
and comprehensive coverage.
"""
)
# Define specialized writing agent
writing_agent = Agent(
name="Writing Specialist",
instructions="""
You are a professional writer. Take research information and create
well-structured, engaging content with proper formatting and flow.
Make content accessible and compelling for readers.
"""
)
# Define editing agent
editing_agent = Agent(
name="Editing Specialist",
instructions="""
You are a professional editor. Review written content for:
- Grammar and spelling errors
- Clarity and readability
- Structure and flow
- Consistency and tone
Provide the improved version of the content.
"""
)
# Create function tools from agents
@function_tool
async def research_tool(topic: str) -> str:
"""Research a topic using the specialized research agent with custom configuration"""
result = await Runner.run(
research_agent,
input=f"Research this topic thoroughly and provide key insights: {topic}",
max_turns=3 # Allow deeper research
)
return str(result.final_output)
@function_tool
async def writing_tool(content: str, style: str = "professional") -> str:
"""Transform content using the specialized writing agent with custom style"""
prompt = f"Write engaging {style} content based on this research: {content}"
result = await Runner.run(
writing_agent,
input=prompt,
max_turns=2
)
return str(result.final_output)
@function_tool
async def editing_tool(content: str) -> str:
"""Edit and improve content using the specialized editing agent"""
result = await Runner.run(
editing_agent,
input=f"Edit and improve this content for clarity, grammar, and engagement: {content}"
)
return str(result.final_output)
# Create orchestrator agent that uses other agents as tools
content_orchestrator = Agent(
name="Content Creation Orchestrator",
instructions="""
You are a content creation orchestrator that coordinates research, writing, and editing.
You have access to:
- research_tool: For in-depth topic research and insights
- writing_tool: For professional content creation (specify style: professional, casual, academic, etc.)
- editing_tool: For content review and improvement
When users request content:
1. First use research_tool to gather comprehensive information
2. Then use writing_tool to create well-structured content
3. Finally use editing_tool to polish and improve the final piece
Coordinate all three tools to create high-quality, well-researched content.
""",
tools=[research_tool, writing_tool, editing_tool]
)
# Example 1: Basic content creation workflow
async def basic_content_workflow():
"""Demonstrates basic orchestration using agents as tools"""
print("=== Basic Content Creation Workflow ===")
result = await Runner.run(
content_orchestrator,
"""Create a comprehensive article about the benefits of renewable energy.
I need it to be professional and well-researched, suitable for a business audience."""
)
print(f"Final article: {result.final_output}")
return result
# Example 2: Custom workflow with specific requirements
async def custom_workflow_example():
"""Shows orchestrator handling specific workflow requirements"""
print("\n=== Custom Workflow with Specific Requirements ===")
result = await Runner.run(
content_orchestrator,
"""I need content about artificial intelligence in healthcare for a technical blog.
Make sure to:
1. Research current AI applications in medical diagnosis
2. Write in an accessible but technical style
3. Include both benefits and challenges
4. Keep it under 500 words
Please go through the full research -> write -> edit process."""
)
print(f"Technical blog post: {result.final_output}")
return result
# Example 3: Comparison with direct agent orchestration
async def direct_orchestration_comparison():
"""Compares agents-as-tools vs direct orchestration"""
print("\n=== Direct Orchestration (Manual) ===")
topic = "The future of remote work"
# Manual orchestration - calling agents directly
print("Step 1: Research...")
research_result = await Runner.run(
research_agent,
f"Research trends and predictions about: {topic}"
)
print("Step 2: Writing...")
writing_result = await Runner.run(
writing_agent,
f"Write a professional article based on this research: {research_result.final_output}"
)
print("Step 3: Editing...")
editing_result = await Runner.run(
editing_agent,
f"Edit and improve this article: {writing_result.final_output}"
)
print(f"Manual orchestration result: {editing_result.final_output}")
print("\n=== Agents-as-Tools Orchestration (Automatic) ===")
# Automatic orchestration using orchestrator agent
orchestrated_result = await Runner.run(
content_orchestrator,
f"Create a professional article about: {topic}. Go through research, writing, and editing."
)
print(f"Automatic orchestration result: {orchestrated_result.final_output}")
return editing_result, orchestrated_result
# Example 4: Advanced orchestrator with conditional logic
async def advanced_orchestrator_example():
"""Shows more sophisticated orchestration logic"""
print("\n=== Advanced Orchestrator with Conditional Logic ===")
# Create advanced orchestrator with conditional workflows
advanced_orchestrator = Agent(
name="Advanced Content Orchestrator",
instructions="""
You are an intelligent content orchestrator that adapts workflows based on requirements.
Available tools:
- research_tool: For topic research
- writing_tool: For content creation (styles: professional, casual, academic, creative)
- editing_tool: For content improvement
Workflow decisions:
- For complex/technical topics: Do extra research first
- For creative content: Use creative writing style
- For short content: Skip detailed research
- For business content: Always edit for professionalism
- Always explain your workflow decisions
Adapt your approach based on the specific request.
""",
tools=[research_tool, writing_tool, editing_tool]
)
# Test with different content types
requests = [
"Write a quick social media post about coffee benefits",
"Create a detailed technical whitepaper on blockchain security",
"Write a creative story about a robot learning to paint"
]
for i, request in enumerate(requests, 1):
print(f"\nRequest {i}: {request}")
result = await Runner.run(advanced_orchestrator, request)
print(f"Result: {result.final_output}")
print("-" * 50)
return requests
# Main execution
async def main():
print("🔧 OpenAI Agents SDK - Agents as Tools Orchestration")
print("=" * 60)
await basic_content_workflow()
await custom_workflow_example()
await direct_orchestration_comparison()
await advanced_orchestrator_example()
print("\n✅ Agents as tools tutorial complete!")
print("Agents as tools enable sophisticated workflow orchestration with intelligent coordination")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/9_2_agents_as_tools/agent.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/single_agent_apps/ai_recipe_meal_planning_agent/ai_recipe_meal_planning_agent.py | import asyncio
import os
import streamlit as st
import random
from textwrap import dedent
from typing import Dict, List, Optional
from agno.agent import Agent
from agno.run.agent import RunOutput
from agno.models.openai import OpenAIChat
from agno.tools import tool
import requests
from dotenv import load_dotenv
from agno.tools.duckduckgo import DuckDuckGoTools
load_dotenv()
SPOONACULAR_API_KEY = os.getenv("SPOONACULAR_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
@tool
def search_recipes(ingredients: str, diet_type: Optional[str] = None) -> Dict:
"""Search for detailed recipes with cooking instructions."""
if not SPOONACULAR_API_KEY:
return {"error": "Spoonacular API key not found"}
url = "https://api.spoonacular.com/recipes/findByIngredients"
params = {
"apiKey": SPOONACULAR_API_KEY,
"ingredients": ingredients,
"number": 5,
"ranking": 2,
"ignorePantry": True
}
if diet_type:
params["diet"] = diet_type
try:
response = requests.get(url, params=params, timeout=15)
response.raise_for_status()
recipes = response.json()
detailed_recipes = []
for recipe in recipes[:3]:
detail_url = f"https://api.spoonacular.com/recipes/{recipe['id']}/information"
detail_response = requests.get(detail_url, params={"apiKey": SPOONACULAR_API_KEY}, timeout=10)
if detail_response.status_code == 200:
detail_data = detail_response.json()
detailed_recipes.append({
"id": recipe['id'],
"title": recipe['title'],
"ready_in_minutes": detail_data.get('readyInMinutes', 'N/A'),
"servings": detail_data.get('servings', 'N/A'),
"health_score": detail_data.get('healthScore', 0),
"used_ingredients": [i['name'] for i in recipe['usedIngredients']],
"missing_ingredients": [i['name'] for i in recipe['missedIngredients']],
"instructions": detail_data.get('instructions', 'Instructions not available')
})
return {
"recipes": detailed_recipes,
"total_found": len(recipes)
}
except:
return {"error": "Recipe search failed"}
@tool
def analyze_nutrition(recipe_name: str) -> Dict:
"""Get nutrition analysis for a recipe by searching for it."""
if not SPOONACULAR_API_KEY:
return {"error": "API key not found"}
# First search for the recipe
search_url = "https://api.spoonacular.com/recipes/complexSearch"
search_params = {
"apiKey": SPOONACULAR_API_KEY,
"query": recipe_name,
"number": 1,
"addRecipeInformation": True,
"addRecipeNutrition": True
}
try:
search_response = requests.get(search_url, params=search_params, timeout=15)
search_response.raise_for_status()
search_data = search_response.json()
if not search_data.get('results'):
return {"error": f"No recipe found for '{recipe_name}'"}
recipe = search_data['results'][0]
if 'nutrition' not in recipe:
return {"error": "No nutrition data available for this recipe"}
nutrients = {n['name']: n['amount'] for n in recipe['nutrition']['nutrients']}
calories = round(nutrients.get('Calories', 0))
protein = round(nutrients.get('Protein', 0), 1)
carbs = round(nutrients.get('Carbohydrates', 0), 1)
fat = round(nutrients.get('Fat', 0), 1)
fiber = round(nutrients.get('Fiber', 0), 1)
sodium = round(nutrients.get('Sodium', 0), 1)
# Health insights
health_insights = []
if protein > 25:
health_insights.append("✅ High protein - great for muscle building")
if fiber > 5:
health_insights.append("✅ High fiber - supports digestive health")
if sodium < 600:
health_insights.append("✅ Low sodium - heart-friendly")
if calories < 400:
health_insights.append("✅ Low calorie - good for weight management")
return {
"recipe_title": recipe.get('title', 'Recipe'),
"servings": recipe.get('servings', 1),
"ready_in_minutes": recipe.get('readyInMinutes', 'N/A'),
"health_score": recipe.get('healthScore', 0),
"calories": calories,
"protein": protein,
"carbs": carbs,
"fat": fat,
"fiber": fiber,
"sodium": sodium,
"health_insights": health_insights
}
except:
return {"error": "Nutrition analysis failed"}
@tool
def estimate_costs(ingredients: List[str], servings: int = 4) -> Dict:
"""Detailed cost estimation with budget tips."""
prices = {
"chicken breast": 6.99, "ground beef": 5.99, "salmon": 12.99,
"rice": 2.99, "pasta": 1.99, "broccoli": 2.99, "tomatoes": 3.99,
"cheese": 5.99, "onion": 1.49, "garlic": 2.99, "olive oil": 7.99
}
cost_breakdown = []
total_cost = 0
for ingredient in ingredients:
ingredient_lower = ingredient.lower().strip()
cost = 3.99 # default
for key, price in prices.items():
if key in ingredient_lower or any(word in ingredient_lower for word in key.split()):
cost = price
break
adjusted_cost = (cost * servings) / 4
total_cost += adjusted_cost
cost_breakdown.append({
"name": ingredient.title(),
"cost": round(adjusted_cost, 2)
})
# Budget tips
budget_tips = []
if total_cost > 30:
budget_tips.append("💡 Consider buying in bulk for better prices")
if total_cost > 40:
budget_tips.append("💡 Look for seasonal alternatives to reduce costs")
budget_tips.append("💡 Shop at local markets for fresher, cheaper produce")
return {
"total_cost": round(total_cost, 2),
"cost_per_serving": round(total_cost / servings, 2),
"servings": servings,
"breakdown": cost_breakdown,
"budget_tips": budget_tips
}
@tool
def create_meal_plan(dietary_preference: str = "balanced", people: int = 2, days: int = 7, budget: str = "moderate") -> Dict:
"""Create comprehensive weekly meal plan with nutrition and shopping list."""
meals = {
"breakfast": [
{"name": "Overnight Oats with Berries", "calories": 320, "protein": 12, "cost": 2.50},
{"name": "Veggie Scramble with Toast", "calories": 280, "protein": 18, "cost": 3.20},
{"name": "Greek Yogurt Parfait", "calories": 250, "protein": 15, "cost": 2.80}
],
"lunch": [
{"name": "Quinoa Buddha Bowl", "calories": 420, "protein": 16, "cost": 4.50},
{"name": "Chicken Caesar Wrap", "calories": 380, "protein": 25, "cost": 5.20},
{"name": "Lentil Vegetable Soup", "calories": 340, "protein": 18, "cost": 3.80}
],
"dinner": [
{"name": "Grilled Salmon with Vegetables", "calories": 520, "protein": 35, "cost": 8.90},
{"name": "Chicken Stir Fry with Brown Rice", "calories": 480, "protein": 32, "cost": 6.50},
{"name": "Vegetable Curry with Quinoa", "calories": 450, "protein": 15, "cost": 5.20}
]
}
budget_multipliers = {"low": 0.7, "moderate": 1.0, "high": 1.3}
multiplier = budget_multipliers.get(budget.lower(), 1.0)
weekly_plan = {}
shopping_list = set()
total_weekly_cost = 0
total_weekly_calories = 0
total_weekly_protein = 0
day_names = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
for day in day_names[:days]:
daily_meals = {}
daily_calories = 0
daily_protein = 0
daily_cost = 0
for meal_type in ["breakfast", "lunch", "dinner"]:
selected_meal = random.choice(meals[meal_type])
daily_meals[meal_type] = {
"name": selected_meal["name"],
"calories": selected_meal["calories"],
"protein": selected_meal["protein"]
}
meal_cost = selected_meal["cost"] * people * multiplier
daily_calories += selected_meal["calories"]
daily_protein += selected_meal["protein"]
daily_cost += meal_cost
# Add to shopping list
if "chicken" in selected_meal["name"].lower():
shopping_list.add("Chicken breast")
if "salmon" in selected_meal["name"].lower():
shopping_list.add("Salmon fillets")
if "vegetable" in selected_meal["name"].lower():
shopping_list.update(["Mixed vegetables", "Onions", "Garlic"])
if "quinoa" in selected_meal["name"].lower():
shopping_list.add("Quinoa")
if "oats" in selected_meal["name"].lower():
shopping_list.add("Rolled oats")
weekly_plan[day] = daily_meals
total_weekly_cost += daily_cost
total_weekly_calories += daily_calories
total_weekly_protein += daily_protein
# Generate insights
avg_daily_calories = round(total_weekly_calories / days)
avg_daily_protein = round(total_weekly_protein / days, 1)
insights = []
if avg_daily_calories < 1800:
insights.append("⚠️ Consider adding healthy snacks to meet calorie needs")
elif avg_daily_calories > 2200:
insights.append("💡 Calorie-dense meals - great for active lifestyles")
if avg_daily_protein > 80:
insights.append("✅ Excellent protein intake for muscle maintenance")
elif avg_daily_protein < 60:
insights.append("💡 Consider adding more protein sources")
return {
"meal_plan": weekly_plan,
"total_weekly_cost": round(total_weekly_cost, 2),
"cost_per_person_per_day": round(total_weekly_cost / (people * days), 2),
"avg_daily_calories": avg_daily_calories,
"avg_daily_protein": avg_daily_protein,
"dietary_preference": dietary_preference,
"serves": people,
"days": days,
"shopping_list": sorted(list(shopping_list)),
"insights": insights
}
async def create_agent():
agent = Agent(
name="MealPlanningExpert",
model=OpenAIChat(id="gpt-5-mini"),
tools=[search_recipes, analyze_nutrition, estimate_costs, create_meal_plan, DuckDuckGoTools()],
instructions=dedent("""\
You are an expert meal planning assistant. Provide detailed, helpful responses:
🔍 **Recipe Searches**: Include cooking time, health scores, ingredient lists, and instructions
📊 **Nutrition Analysis**: Provide health insights, nutritional breakdowns, and dietary advice
💰 **Cost Estimation**: Include budget tips and cost per serving breakdowns
📅 **Meal Planning**: Create detailed weekly plans with nutritional balance and shopping lists
**Always**:
- Use clear headings and bullet points
- Include practical cooking tips
- Consider dietary restrictions and budgets
- Provide actionable next steps
- Be encouraging and supportive
"""),
markdown=True,
debug_mode=True
)
return agent
def main():
st.set_page_config(page_title="AI Meal Planning Agent", page_icon="🍽️", layout="wide")
st.title("🍽️ AI Meal Planning Agent")
st.markdown("*Your intelligent companion for recipes, nutrition, and meal planning*")
if not OPENAI_API_KEY:
st.error("Please add OPENAI_API_KEY to your .env file")
st.stop()
# Initialize agent
if "agent" not in st.session_state:
with st.spinner("Initializing agent..."):
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
st.session_state.agent = loop.run_until_complete(create_agent())
except Exception as e:
st.error(f"Failed to initialize agent: {e}")
st.stop()
# Initialize messages
if "messages" not in st.session_state:
st.session_state.messages = [{
"role": "assistant",
"content": """👋 **Welcome! I'm your AI Meal Planning Expert.**
I can help you with:
- 🔍 **Recipe Discovery** - Find recipes based on your ingredients
- 📊 **Nutrition Analysis** - Get detailed nutritional insights
- 💰 **Cost Estimation** - Smart budget planning with money-saving tips
- 📅 **Meal Planning** - Complete weekly meal plans with shopping lists
**Try asking:**
- "Find healthy chicken recipes for dinner"
- "What's the nutrition info for chicken teriyaki?"
- "Create a vegetarian meal plan for 2 people for one week"
- "Estimate costs for pasta, tomatoes, cheese, and basil for 4 servings"
What would you like to explore? 🍽️"""
}]
# Chat interface
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if user_input := st.chat_input("Ask about recipes, nutrition, meal planning, or costs..."):
st.session_state.messages.append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
response: RunOutput = loop.run_until_complete(
st.session_state.agent.arun(user_input)
)
st.markdown(response.content)
st.session_state.messages.append({
"role": "assistant",
"content": response.content
})
except Exception as e:
error_msg = f"Error: {str(e)}"
st.error(error_msg)
st.session_state.messages.append({
"role": "assistant",
"content": error_msg
})
if __name__ == "__main__":
main() | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/single_agent_apps/ai_recipe_meal_planning_agent/ai_recipe_meal_planning_agent.py",
"license": "Apache License 2.0",
"lines": 319,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_email_gtm_outreach_agent/ai_email_gtm_outreach_agent.py | import json
import os
import sys
from typing import Any, Dict, List, Optional
import streamlit as st
from agno.agent import Agent
from agno.run.agent import RunOutput
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.tools.exa import ExaTools
def require_env(var_name: str) -> None:
if not os.getenv(var_name):
print(f"Error: {var_name} not set. export {var_name}=...")
sys.exit(1)
def create_company_finder_agent() -> Agent:
exa_tools = ExaTools(category="company")
db = SqliteDb(db_file="tmp/gtm_outreach.db")
return Agent(
model=OpenAIChat(id="gpt-5"),
tools=[exa_tools],
db=db,
enable_user_memories=True,
add_history_to_context=True,
num_history_runs=6,
session_id="gtm_outreach_company_finder",
debug_mode=True,
instructions=[
"You are CompanyFinderAgent. Use ExaTools to search the web for companies that match the targeting criteria.",
"Return ONLY valid JSON with key 'companies' as a list; respect the requested limit provided in the user prompt.",
"Each item must have: name, website, why_fit (1-2 lines).",
],
)
def create_contact_finder_agent() -> Agent:
exa_tools = ExaTools()
db = SqliteDb(db_file="tmp/gtm_outreach.db")
return Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[exa_tools],
db=db,
enable_user_memories=True,
add_history_to_context=True,
num_history_runs=6,
session_id="gtm_outreach_contact_finder",
debug_mode=True,
instructions=[
"You are ContactFinderAgent. Use ExaTools to find 1-2 relevant decision makers per company and their emails if available.",
"Prioritize roles from Founder's Office, GTM (Marketing/Growth), Sales leadership, Partnerships/Business Development, and Product Marketing.",
"Search queries can include patterns like '<Company> email format', 'contact', 'team', 'leadership', and role titles.",
"If direct emails are not found, infer likely email using common formats (e.g., first.last@domain), but mark inferred=true.",
"Return ONLY valid JSON with key 'companies' as a list; each has: name, contacts: [{full_name, title, email, inferred}]",
],
)
def get_email_style_instruction(style_key: str) -> str:
styles = {
"Professional": "Style: Professional. Clear, respectful, and businesslike. Short paragraphs; no slang.",
"Casual": "Style: Casual. Friendly, approachable, first-name basis. No slang or emojis; keep it human.",
"Cold": "Style: Cold email. Strong hook in opening 2 lines, tight value proposition, minimal fluff, strong CTA.",
"Consultative": "Style: Consultative. Insight-led, frames observed problems and tailored solution hypotheses; soft CTA.",
}
return styles.get(style_key, styles["Professional"])
def create_email_writer_agent(style_key: str = "Professional") -> Agent:
db = SqliteDb(db_file="tmp/gtm_outreach.db")
style_instruction = get_email_style_instruction(style_key)
return Agent(
model=OpenAIChat(id="gpt-5"),
tools=[],
db=db,
enable_user_memories=True,
add_history_to_context=True,
num_history_runs=6,
session_id="gtm_outreach_email_writer",
debug_mode=False,
instructions=[
"You are EmailWriterAgent. Write concise, personalized B2B outreach emails.",
style_instruction,
"Return ONLY valid JSON with key 'emails' as a list of items: {company, contact, subject, body}.",
"Length: 120-160 words. Include 1-2 lines of strong personalization referencing research insights (company website and Reddit findings).",
"CTA: suggest a short intro call; include sender company name and calendar link if provided.",
],
)
def create_research_agent() -> Agent:
"""Agent to gather interesting insights from company websites and Reddit."""
exa_tools = ExaTools()
db = SqliteDb(db_file="tmp/gtm_outreach.db")
return Agent(
model=OpenAIChat(id="gpt-5"),
tools=[exa_tools],
db=db,
enable_user_memories=True,
add_history_to_context=True,
num_history_runs=6,
session_id="gtm_outreach_researcher",
debug_mode=True,
instructions=[
"You are ResearchAgent. For each company, collect concise, valuable insights from:",
"1) Their official website (about, blog, product pages)",
"2) Reddit discussions (site:reddit.com mentions)",
"Summarize 2-4 interesting, non-generic points per company that a human would bring up in an email to show genuine effort.",
"Return ONLY valid JSON with key 'companies' as a list; each has: name, insights: [strings].",
],
)
def extract_json_or_raise(text: str) -> Dict[str, Any]:
"""Extract JSON from a model response. Assumes the response is pure JSON."""
try:
return json.loads(text)
except Exception as e:
# Try to locate a JSON block if extra text snuck in
start = text.find("{")
end = text.rfind("}")
if start != -1 and end != -1 and end > start:
candidate = text[start : end + 1]
return json.loads(candidate)
raise ValueError(f"Failed to parse JSON: {e}\nResponse was:\n{text}")
def run_company_finder(agent: Agent, target_desc: str, offering_desc: str, max_companies: int) -> List[Dict[str, str]]:
prompt = (
f"Find exactly {max_companies} companies that are a strong B2B fit given the user inputs.\n"
f"Targeting: {target_desc}\n"
f"Offering: {offering_desc}\n"
"For each, provide: name, website, why_fit (1-2 lines)."
)
resp: RunOutput = agent.run(prompt)
data = extract_json_or_raise(str(resp.content))
companies = data.get("companies", [])
return companies[: max(1, min(max_companies, 10))]
def run_contact_finder(agent: Agent, companies: List[Dict[str, str]], target_desc: str, offering_desc: str) -> List[Dict[str, Any]]:
prompt = (
"For each company below, find 2-3 relevant decision makers and emails (if available). Ensure at least 2 per company when possible, and cap at 3.\n"
"If not available, infer likely email and mark inferred=true.\n"
f"Targeting: {target_desc}\nOffering: {offering_desc}\n"
f"Companies JSON: {json.dumps(companies, ensure_ascii=False)}\n"
"Return JSON: {companies: [{name, contacts: [{full_name, title, email, inferred}]}]}"
)
resp: RunOutput = agent.run(prompt)
data = extract_json_or_raise(str(resp.content))
return data.get("companies", [])
def run_research(agent: Agent, companies: List[Dict[str, str]]) -> List[Dict[str, Any]]:
prompt = (
"For each company, gather 2-4 interesting insights from their website and Reddit that would help personalize outreach.\n"
f"Companies JSON: {json.dumps(companies, ensure_ascii=False)}\n"
"Return JSON: {companies: [{name, insights: [string, ...]}]}"
)
resp: RunOutput = agent.run(prompt)
data = extract_json_or_raise(str(resp.content))
return data.get("companies", [])
def run_email_writer(agent: Agent, contacts_data: List[Dict[str, Any]], research_data: List[Dict[str, Any]], offering_desc: str, sender_name: str, sender_company: str, calendar_link: Optional[str]) -> List[Dict[str, str]]:
prompt = (
"Write personalized outreach emails for the following contacts.\n"
f"Sender: {sender_name} at {sender_company}.\n"
f"Offering: {offering_desc}.\n"
f"Calendar link: {calendar_link or 'N/A'}.\n"
f"Contacts JSON: {json.dumps(contacts_data, ensure_ascii=False)}\n"
f"Research JSON: {json.dumps(research_data, ensure_ascii=False)}\n"
"Return JSON with key 'emails' as a list of {company, contact, subject, body}."
)
resp: RunOutput = agent.run(prompt)
data = extract_json_or_raise(str(resp.content))
return data.get("emails", [])
def run_pipeline(target_desc: str, offering_desc: str, sender_name: str, sender_company: str, calendar_link: Optional[str], num_companies: int):
company_agent = create_company_finder_agent()
contact_agent = create_contact_finder_agent()
research_agent = create_research_agent()
companies = run_company_finder(company_agent, target_desc, offering_desc, max_companies=num_companies)
contacts_data = run_contact_finder(contact_agent, companies, target_desc, offering_desc) if companies else []
research_data = run_research(research_agent, companies) if companies else []
return {
"companies": companies,
"contacts": contacts_data,
"research": research_data,
"emails": [],
}
def main() -> None:
st.set_page_config(page_title="GTM B2B Outreach", layout="wide")
# Sidebar: API keys
st.sidebar.header("API Configuration")
openai_key = st.sidebar.text_input("OpenAI API Key", type="password", value=os.getenv("OPENAI_API_KEY", ""))
exa_key = st.sidebar.text_input("Exa API Key", type="password", value=os.getenv("EXA_API_KEY", ""))
if openai_key:
os.environ["OPENAI_API_KEY"] = openai_key
if exa_key:
os.environ["EXA_API_KEY"] = exa_key
if not openai_key or not exa_key:
st.sidebar.warning("Enter both API keys to enable the app")
# Inputs
st.title("GTM B2B Outreach Multi Agent Team")
st.info(
"GTM teams often need to reach out for demos and discovery calls, but manual research and personalization is slow. "
"This app uses GPT-5 with a multi-agent workflow to find target companies, identify contacts, research genuine insights (website + Reddit), "
"and generate tailored outreach emails in your chosen style."
)
col1, col2 = st.columns(2)
with col1:
target_desc = st.text_area("Target companies (industry, size, region, tech, etc.)", height=100)
offering_desc = st.text_area("Your product/service offering (1-3 sentences)", height=100)
with col2:
sender_name = st.text_input("Your name", value="Sales Team")
sender_company = st.text_input("Your company", value="Our Company")
calendar_link = st.text_input("Calendar link (optional)", value="")
num_companies = st.number_input("Number of companies", min_value=1, max_value=10, value=5)
email_style = st.selectbox(
"Email style",
options=["Professional", "Casual", "Cold", "Consultative"],
index=0,
help="Choose the tone/format for the generated emails",
)
if st.button("Start Outreach", type="primary"):
# Validate
if not openai_key or not exa_key:
st.error("Please provide API keys in the sidebar")
elif not target_desc or not offering_desc:
st.error("Please fill in target companies and offering")
else:
# Stage-by-stage progress UI
progress = st.progress(0)
stage_msg = st.empty()
details = st.empty()
try:
# Prepare agents
company_agent = create_company_finder_agent()
contact_agent = create_contact_finder_agent()
research_agent = create_research_agent()
email_agent = create_email_writer_agent(email_style)
# 1. Companies
stage_msg.info("1/4 Finding companies...")
companies = run_company_finder(
company_agent,
target_desc.strip(),
offering_desc.strip(),
max_companies=int(num_companies),
)
progress.progress(25)
details.write(f"Found {len(companies)} companies")
# 2. Contacts
stage_msg.info("2/4 Finding contacts (2–3 per company)...")
contacts_data = run_contact_finder(
contact_agent,
companies,
target_desc.strip(),
offering_desc.strip(),
) if companies else []
progress.progress(50)
details.write(f"Collected contacts for {len(contacts_data)} companies")
# 3. Research
stage_msg.info("3/4 Researching insights (website + Reddit)...")
research_data = run_research(research_agent, companies) if companies else []
progress.progress(75)
details.write(f"Compiled research for {len(research_data)} companies")
# 4. Emails
stage_msg.info("4/4 Writing personalized emails...")
emails = run_email_writer(
email_agent,
contacts_data,
research_data,
offering_desc.strip(),
sender_name.strip() or "Sales Team",
sender_company.strip() or "Our Company",
calendar_link.strip() or None,
) if contacts_data else []
progress.progress(100)
details.write(f"Generated {len(emails)} emails")
st.session_state["gtm_results"] = {
"companies": companies,
"contacts": contacts_data,
"research": research_data,
"emails": emails,
}
stage_msg.success("Completed")
except Exception as e:
stage_msg.error("Pipeline failed")
st.error(f"{e}")
# Show results if present
results = st.session_state.get("gtm_results")
if results:
companies = results.get("companies", [])
contacts = results.get("contacts", [])
research = results.get("research", [])
emails = results.get("emails", [])
st.subheader("Top target companies")
if companies:
for idx, c in enumerate(companies, 1):
st.markdown(f"**{idx}. {c.get('name','')}** ")
st.write(c.get("website", ""))
st.write(c.get("why_fit", ""))
else:
st.info("No companies found")
st.divider()
st.subheader("Contacts found")
if contacts:
for c in contacts:
st.markdown(f"**{c.get('name','')}**")
for p in c.get("contacts", [])[:3]:
inferred = " (inferred)" if p.get("inferred") else ""
st.write(f"- {p.get('full_name','')} | {p.get('title','')} | {p.get('email','')}{inferred}")
else:
st.info("No contacts found")
st.divider()
st.subheader("Research insights")
if research:
for r in research:
st.markdown(f"**{r.get('name','')}**")
for insight in r.get("insights", [])[:4]:
st.write(f"- {insight}")
else:
st.info("No research insights")
st.divider()
st.subheader("Suggested Outreach Emails")
if emails:
for i, e in enumerate(emails, 1):
with st.expander(f"{i}. {e.get('company','')} → {e.get('contact','')}"):
st.write(f"Subject: {e.get('subject','')}")
st.text(e.get("body", ""))
else:
st.info("No emails generated")
if __name__ == "__main__":
main()
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/ai_email_gtm_outreach_agent/ai_email_gtm_outreach_agent.py",
"license": "Apache License 2.0",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/single_agent_apps/ai_email_gtm_reachout_agent/ai_email_gtm_reachout.py | import json
import os
import streamlit as st
from datetime import datetime
from textwrap import dedent
from typing import Dict, Iterator, List, Optional, Literal
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.db.sqlite import SqliteDb
from agno.tools.exa import ExaTools
from agno.utils.log import logger
from agno.utils.pprint import pprint_run_response
from agno.workflow import Workflow
from pydantic import BaseModel, Field
# Initialize API keys from environment or empty defaults
if 'EXA_API_KEY' not in st.session_state:
st.session_state.EXA_API_KEY = os.getenv("EXA_API_KEY", "")
if 'OPENAI_API_KEY' not in st.session_state:
st.session_state.OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
# Set environment variables
os.environ["EXA_API_KEY"] = st.session_state.EXA_API_KEY
os.environ["OPENAI_API_KEY"] = st.session_state.OPENAI_API_KEY
# Demo mode
# - set to True to print email to console
# - set to False to send to yourself
DEMO_MODE = True
today = datetime.now().strftime("%Y-%m-%d")
# Example leads - Replace with your actual targets
leads: Dict[str, Dict[str, str]] = {
"Notion": {
"name": "Notion",
"website": "https://www.notion.so",
"contact_name": "Ivan Zhao",
"position": "CEO",
},
# Add more companies as needed
}
# Updated sender details for an AI analytics company
sender_details_dict: Dict[str, str] = {
"name": "Sarah Chen",
"email": "your.email@company.com", # Your email goes here
"organization": "Data Consultants Inc",
"service_offered": "We help build data products and offer data consulting services",
"calendar_link": "https://calendly.com/data-consultants-inc",
"linkedin": "https://linkedin.com/in/your-profile",
"phone": "+1 (555) 123-4567",
"website": "https://www.data-consultants.com",
}
DEPARTMENT_TEMPLATES = {
"GTM (Sales & Marketing)": {
"Software Solution": """\
Hey [RECIPIENT_NAME],
I noticed [COMPANY_NAME]'s impressive [GTM_INITIATIVE] and your role in scaling [SPECIFIC_ACHIEVEMENT]. Your approach to [SALES_STRATEGY] caught my attention.
[PRODUCT_VALUE_FOR_GTM]
[GTM_SPECIFIC_BENEFIT]
Would love to show you how this could work for your team: [CALENDAR_LINK]
Best,
[SIGNATURE]\
""",
"Consulting Services": """\
Hey [RECIPIENT_NAME],
Your team's recent success with [CAMPAIGN_NAME] is impressive, particularly the [SPECIFIC_METRIC].
[CONSULTING_VALUE_PROP]
[GTM_IMPROVEMENT_POTENTIAL]
Here's my calendar if you'd like to explore this: [CALENDAR_LINK]
Best,
[SIGNATURE]\
"""
},
"Human Resources": {
"Software Solution": """\
Hey [RECIPIENT_NAME],
I've been following [COMPANY_NAME]'s growth and noticed your focus on [HR_INITIATIVE]. Your approach to [SPECIFIC_HR_PROGRAM] stands out.
[HR_TOOL_VALUE_PROP]
[HR_SPECIFIC_BENEFIT]
Would you be open to seeing how this could help your HR initiatives? [CALENDAR_LINK]
Best,
[SIGNATURE]\
""",
"Consulting Services": """\
Hey [RECIPIENT_NAME],
I've been following [COMPANY_NAME]'s journey in [INDUSTRY], and your recent [ACHIEVEMENT] caught my attention. Your approach to [SPECIFIC_FOCUS] aligns perfectly with what we're building.
[PARTNERSHIP_VALUE_PROP]
[MUTUAL_BENEFIT]
Would love to explore potential synergies over a quick call: [CALENDAR_LINK]
Best,
[SIGNATURE]\
""",
"Investment Opportunity": """\
Hey [RECIPIENT_NAME],
Your work at [COMPANY_NAME] in [SPECIFIC_FOCUS] is impressive, especially [RECENT_ACHIEVEMENT].
[INVESTMENT_THESIS]
[UNIQUE_VALUE_ADD]
Here's my calendar if you'd like to discuss: [CALENDAR_LINK]
Best,
[SIGNATURE]\
"""
},
"Marketing Professional": {
"Product Demo": """\
Hey [RECIPIENT_NAME],
I noticed [COMPANY_NAME]'s recent [MARKETING_INITIATIVE] and was impressed by [SPECIFIC_DETAIL].
[PRODUCT_VALUE_PROP]
[BENEFIT_TO_MARKETING]
Would you be open to a quick demo? Here's my calendar: [CALENDAR_LINK]
Best,
[SIGNATURE]\
""",
"Service Offering": """\
Hey [RECIPIENT_NAME],
Saw your team's work on [RECENT_CAMPAIGN] - great execution on [SPECIFIC_ELEMENT].
[SERVICE_VALUE_PROP]
[MARKETING_BENEFIT]
Here's my calendar if you'd like to explore this: [CALENDAR_LINK]
Best,
[SIGNATURE]\
"""
},
"B2B Sales Representative": {
"Product Demo": """\
Hey [RECIPIENT_NAME],
Noticed your team at [COMPANY_NAME] is scaling [SALES_FOCUS]. Your approach to [SPECIFIC_STRATEGY] is spot-on.
[PRODUCT_VALUE_PROP]
[SALES_BENEFIT]
Would you be interested in seeing how this works? Here's my calendar: [CALENDAR_LINK]
Best,
[SIGNATURE]\
""",
"Service Offering": """\
Hey [RECIPIENT_NAME],
Your sales team's success with [RECENT_WIN] caught my attention. Particularly impressed by [SPECIFIC_ACHIEVEMENT].
[SERVICE_VALUE_PROP]
[SALES_IMPROVEMENT]
Here's my calendar if you'd like to discuss: [CALENDAR_LINK]
Best,
[SIGNATURE]\
"""
}
}
COMPANY_CATEGORIES = {
"SaaS/Technology Companies": {
"description": "Software, cloud services, and tech platforms",
"typical_roles": ["CTO", "Head of Engineering", "VP of Product", "Engineering Manager", "Tech Lead"]
},
"E-commerce/Retail": {
"description": "Online retail, marketplaces, and D2C brands",
"typical_roles": ["Head of Digital", "E-commerce Manager", "Marketing Director", "Operations Head"]
},
"Financial Services": {
"description": "Banks, fintech, insurance, and investment firms",
"typical_roles": ["CFO", "Head of Innovation", "Risk Manager", "Product Manager"]
},
"Healthcare/Biotech": {
"description": "Healthcare providers, biotech, and health tech",
"typical_roles": ["Medical Director", "Head of R&D", "Clinical Manager", "Healthcare IT Lead"]
},
"Manufacturing/Industrial": {
"description": "Manufacturing, industrial automation, and supply chain",
"typical_roles": ["Operations Director", "Plant Manager", "Supply Chain Head", "Quality Manager"]
}
}
class OutreachConfig(BaseModel):
"""Configuration for email outreach"""
company_category: str = Field(..., description="Type of companies to target")
target_departments: List[str] = Field(
...,
description="Departments to target (e.g., GTM, HR, Engineering)"
)
service_type: Literal[
"Software Solution",
"Consulting Services",
"Professional Services",
"Technology Platform",
"Custom Development"
] = Field(..., description="Type of service being offered")
company_size_preference: Literal["Startup (1-50)", "SMB (51-500)", "Enterprise (500+)", "All Sizes"] = Field(
default="All Sizes",
description="Preferred company size"
)
personalization_level: Literal["Basic", "Medium", "Deep"] = Field(
default="Deep",
description="Level of personalization"
)
class ContactInfo(BaseModel):
"""Contact information for decision makers"""
name: str = Field(..., description="Contact's full name")
title: str = Field(..., description="Job title/position")
email: Optional[str] = Field(None, description="Email address")
linkedin: Optional[str] = Field(None, description="LinkedIn profile URL")
company: str = Field(..., description="Company name")
department: Optional[str] = Field(None, description="Department")
background: Optional[str] = Field(None, description="Professional background")
class CompanyInfo(BaseModel):
"""
Stores in-depth data about a company gathered during the research phase.
"""
# Basic Information
company_name: str = Field(..., description="Company name")
website_url: str = Field(..., description="Company website URL")
# Business Details
industry: Optional[str] = Field(None, description="Primary industry")
core_business: Optional[str] = Field(None, description="Main business focus")
business_model: Optional[str] = Field(None, description="B2B, B2C, etc.")
# Marketing Information
motto: Optional[str] = Field(None, description="Company tagline/slogan")
value_proposition: Optional[str] = Field(None, description="Main value proposition")
target_audience: Optional[List[str]] = Field(
None, description="Target customer segments"
)
# Company Metrics
company_size: Optional[str] = Field(None, description="Employee count range")
founded_year: Optional[int] = Field(None, description="Year founded")
locations: Optional[List[str]] = Field(None, description="Office locations")
# Technical Details
technologies: Optional[List[str]] = Field(None, description="Technology stack")
integrations: Optional[List[str]] = Field(None, description="Software integrations")
# Market Position
competitors: Optional[List[str]] = Field(None, description="Main competitors")
unique_selling_points: Optional[List[str]] = Field(
None, description="Key differentiators"
)
market_position: Optional[str] = Field(None, description="Market positioning")
# Social Proof
customers: Optional[List[str]] = Field(None, description="Notable customers")
case_studies: Optional[List[str]] = Field(None, description="Success stories")
awards: Optional[List[str]] = Field(None, description="Awards and recognition")
# Recent Activity
recent_news: Optional[List[str]] = Field(None, description="Recent news/updates")
blog_topics: Optional[List[str]] = Field(None, description="Recent blog topics")
# Pain Points & Opportunities
challenges: Optional[List[str]] = Field(None, description="Potential pain points")
growth_areas: Optional[List[str]] = Field(None, description="Growth opportunities")
# Contact Information
email_address: Optional[str] = Field(None, description="Contact email")
phone: Optional[str] = Field(None, description="Contact phone")
social_media: Optional[Dict[str, str]] = Field(
None, description="Social media links"
)
# Additional Fields
pricing_model: Optional[str] = Field(None, description="Pricing strategy and tiers")
user_base: Optional[str] = Field(None, description="Estimated user base size")
key_features: Optional[List[str]] = Field(None, description="Main product features")
integration_ecosystem: Optional[List[str]] = Field(
None, description="Integration partners"
)
funding_status: Optional[str] = Field(
None, description="Latest funding information"
)
growth_metrics: Optional[Dict[str, str]] = Field(
None, description="Key growth indicators"
)
class PersonalisedEmailGenerator(Workflow):
"""
Automated B2B outreach system that:
1. Discovers companies using Exa search based on criteria
2. Finds contact details for decision makers at those companies
3. Researches company details and pain points
4. Generates personalized cold emails for B2B outreach
This workflow is designed to automate the entire prospecting process
from company discovery to personalized email generation.
"""
description: str = dedent("""\
AI-Powered B2B Outreach Workflow:
--------------------------------------------------------
1. Discover Target Companies (Exa Search)
2. Find Decision Maker Contacts
3. Research Company Intelligence
4. Generate Personalized Emails
--------------------------------------------------------
Fully automated prospecting pipeline for B2B outreach.
""")
company_finder: Agent = Agent(
model=OpenAIChat(id="gpt-5"),
tools=[ExaTools(api_key=os.environ["EXA_API_KEY"])],
description="Expert at finding companies that match specific criteria using web search",
instructions=dedent("""\
You are a company discovery specialist. Your job is to find companies that match the given criteria.
Search for companies based on:
- Industry/sector
- Company size
- Geographic location
- Business model
- Technology stack
- Recent funding/growth
For each company found, provide:
- Company name
- Website URL
- Brief description
- Industry
- Estimated size
- Location
Focus on finding companies that would be good prospects for the specified service offering.
Look for companies showing signs of growth, funding, or expansion.
"""),
)
contact_finder: Agent = Agent(
model=OpenAIChat(id="gpt-5"),
tools=[ExaTools(api_key=os.environ["EXA_API_KEY"])],
description="Expert at finding contact information for decision makers at companies",
instructions=dedent("""\
You are a contact research specialist. Find decision makers and their contact information.
For each company, search for:
- Key decision makers in target departments
- Their email addresses
- LinkedIn profiles
- Professional backgrounds
- Current role and responsibilities
Focus on finding people in roles like:
- CEO, CTO, VP of Engineering (for tech solutions)
- CMO, VP Marketing, Growth Lead (for marketing solutions)
- VP Sales, Sales Director (for sales solutions)
- HR Director, People Ops (for HR solutions)
Provide verified contact information when possible.
"""),
)
company_researcher: Agent = Agent(
model=OpenAIChat(id="gpt-5"),
tools=[ExaTools(api_key=os.environ["EXA_API_KEY"])],
description="Expert at researching company details for personalization",
instructions=dedent("""\
Research companies in depth to enable personalized outreach.
Analyze:
- Company website and messaging
- Recent news and updates
- Product/service offerings
- Technology stack
- Growth indicators
- Pain points and challenges
- Recent achievements
- Market position
Focus on insights that would be relevant for B2B outreach:
- Scaling challenges
- Technology needs
- Market expansion
- Competitive positioning
- Recent wins or milestones
"""),
)
email_creator: Agent = Agent(
model=OpenAIChat(id="gpt-5"),
description=dedent("""\
You are writing for a friendly, empathetic 20-year-old sales rep whose
style is cool, concise, and respectful. Tone is casual yet professional.
- Be polite but natural, using simple language.
- Never sound robotic or use big cliché words like "delve", "synergy" or "revolutionary."
- Clearly address problems the prospect might be facing and how we solve them.
- Keep paragraphs short and friendly, with a natural voice.
- End on a warm, upbeat note, showing willingness to help.\
"""),
instructions=dedent("""\
Please craft a highly personalized email that has:
1. A simple, personal subject line referencing the problem or opportunity.
2. At least one area for improvement or highlight from research.
3. A quick explanation of how we can help them (no heavy jargon).
4. References a known challenge from the research.
5. Avoid words like "delve", "explore", "synergy", "amplify", "game changer", "revolutionary", "breakthrough".
6. Use first-person language ("I") naturally.
7. Maintain a 20-year-old's friendly style—brief and to the point.
8. Avoid placing the recipient's name in the subject line.
Use the appropriate template based on the target professional type and outreach purpose.
Ensure the final tone feels personal and conversation-like, not automatically generated.
----------------------------------------------------------------------
"""),
)
def get_cached_data(self, cache_key: str) -> Optional[dict]:
"""Retrieve cached data"""
logger.info(f"Checking cache for: {cache_key}")
return self.session_state.get("cache", {}).get(cache_key)
def cache_data(self, cache_key: str, data: dict):
"""Cache data"""
logger.info(f"Caching data for: {cache_key}")
self.session_state.setdefault("cache", {})
self.session_state["cache"][cache_key] = data
self.write_to_storage()
def run(
self,
config: OutreachConfig,
sender_details: Dict[str, str],
num_companies: int = 5,
use_cache: bool = True,
):
"""
Automated B2B outreach workflow:
1. Discover companies using Exa search based on criteria
2. Find decision maker contacts for each company
3. Research company details for personalization
4. Generate personalized emails
"""
logger.info("Starting automated B2B outreach workflow...")
# Step 1: Discover companies
logger.info("🔍 Discovering target companies...")
search_query = f"""
Find {num_companies} {config.company_category} companies that would be good prospects for {config.service_type}.
Company criteria:
- Industry: {config.company_category}
- Size: {config.company_size_preference}
- Target departments: {', '.join(config.target_departments)}
Look for companies showing growth, recent funding, or expansion.
"""
companies_response = self.company_finder.run(search_query)
if not companies_response or not companies_response.content:
logger.error("No companies found")
return
# Parse companies from response
companies_text = companies_response.content
logger.info(f"Found companies: {companies_text[:200]}...")
# Step 2: For each company, find contacts and research
for i in range(num_companies):
try:
logger.info(f"Processing company #{i+1}")
# Yield progress update
yield {
"step": f"Processing company {i+1}/{num_companies}",
"progress": (i + 0.2) / num_companies,
"status": "Finding contacts..."
}
# Extract company info from the response
company_search = f"Extract company #{i+1} details from: {companies_text}"
# Step 3: Find decision maker contacts
logger.info("👥 Finding decision maker contacts...")
contacts_query = f"""
Find decision makers at company #{i+1} from this list: {companies_text}
Focus on roles in: {', '.join(config.target_departments)}
Find their email addresses and LinkedIn profiles.
"""
contacts_response = self.contact_finder.run(contacts_query)
if not contacts_response or not contacts_response.content:
logger.warning(f"No contacts found for company #{i+1}")
continue
# Yield progress update
yield {
"step": f"Processing company {i+1}/{num_companies}",
"progress": (i + 0.4) / num_companies,
"status": "Researching company..."
}
# Step 4: Research company details
logger.info("🔬 Researching company details...")
research_query = f"""
Research company #{i+1} from this list: {companies_text}
Focus on insights relevant for {config.service_type} outreach.
Find pain points related to {', '.join(config.target_departments)}.
"""
research_response = self.company_researcher.run(research_query)
if not research_response or not research_response.content:
logger.warning(f"No research data for company #{i+1}")
continue
# Parse the research response content
research_content = research_response.content
if not research_content:
logger.warning(f"No research data for company #{i+1}")
continue
# Create a basic company info structure from the research
company_data = CompanyInfo(
company_name=f"Company #{i+1}", # Will be updated with actual name
website_url="", # Will be updated with actual URL
industry="Unknown",
core_business=research_content[:200] if research_content else "No data available"
)
# Yield progress update
yield {
"step": f"Processing company {i+1}/{num_companies}",
"progress": (i + 0.6) / num_companies,
"status": "Generating email..."
}
# Step 5: Generate personalized email
logger.info("✉️ Generating personalized email...")
# Get appropriate template based on target departments
template_dept = config.target_departments[0] if config.target_departments else "GTM (Sales & Marketing)"
if template_dept in DEPARTMENT_TEMPLATES and config.service_type in DEPARTMENT_TEMPLATES[template_dept]:
template = DEPARTMENT_TEMPLATES[template_dept][config.service_type]
else:
template = DEPARTMENT_TEMPLATES["GTM (Sales & Marketing)"]["Software Solution"]
email_context = json.dumps(
{
"template": template,
"company_info": company_data.model_dump(),
"contacts_info": contacts_response.content,
"sender_details": sender_details,
"target_departments": config.target_departments,
"service_type": config.service_type,
"personalization_level": config.personalization_level
},
indent=4,
)
email_response = self.email_creator.run(
f"Generate a personalized email using this context:\n{email_context}"
)
if not email_response or not email_response.content:
logger.warning(f"No email generated for company #{i+1}")
continue
yield {
"company_name": company_data.company_name,
"email": email_response.content,
"company_data": company_data.model_dump(),
"contacts": contacts_response.content,
"step": f"Company {i+1}/{num_companies} completed",
"progress": (i + 1) / num_companies,
"status": "Completed"
}
except Exception as e:
logger.error(f"Error processing company #{i+1}: {e}")
continue
def create_streamlit_ui():
"""Create the Streamlit user interface"""
st.title("🚀 Automated B2B Email Outreach Generator")
st.markdown("""
**Fully automated prospecting pipeline**: Discovers companies, finds decision makers,
and generates personalized emails using AI research agents.
""")
# Step 1: Target Company Category Selection
st.header("1️⃣ Target Company Discovery")
col1, col2 = st.columns([2, 1])
with col1:
selected_category = st.selectbox(
"What type of companies should we target?",
options=list(COMPANY_CATEGORIES.keys()),
key="company_category"
)
st.info(f"📌 {COMPANY_CATEGORIES[selected_category]['description']}")
st.markdown("### Typical Decision Makers We'll Find:")
for role in COMPANY_CATEGORIES[selected_category]['typical_roles']:
st.markdown(f"- {role}")
with col2:
st.markdown("### Company Size Filter")
company_size = st.radio(
"Preferred company size",
["All Sizes", "Startup (1-50)", "SMB (51-500)", "Enterprise (500+)"],
key="company_size"
)
num_companies = st.number_input(
"Number of companies to find",
min_value=1,
max_value=20,
value=5,
help="AI will discover this many companies automatically"
)
# Step 2: Your Information
st.header("2️⃣ Your Contact Information")
col3, col4 = st.columns(2)
with col3:
st.subheader("Required Information")
sender_details = {
"name": st.text_input("Your Name *", key="sender_name"),
"email": st.text_input("Your Email *", key="sender_email"),
"organization": st.text_input("Your Organization *", key="sender_org")
}
with col4:
st.subheader("Optional Information")
sender_details.update({
"linkedin": st.text_input("LinkedIn Profile (optional)", key="sender_linkedin", placeholder="https://linkedin.com/in/yourname"),
"phone": st.text_input("Phone Number (optional)", key="sender_phone", placeholder="+1 (555) 123-4567"),
"website": st.text_input("Company Website (optional)", key="sender_website", placeholder="https://yourcompany.com"),
"calendar_link": st.text_input("Calendar Link (optional)", key="sender_calendar", placeholder="https://calendly.com/yourname")
})
# Service description
sender_details["service_offered"] = st.text_area(
"Describe your offering *",
height=100,
key="service_description",
help="Explain what you offer and how it helps businesses",
placeholder="We help companies build custom AI solutions that automate workflows and improve efficiency..."
)
# Step 3: Service Type and Targeting
st.header("3️⃣ Outreach Configuration")
col5, col6 = st.columns(2)
with col5:
service_type = st.selectbox(
"Service/Product Category",
[
"Software Solution",
"Consulting Services",
"Professional Services",
"Technology Platform",
"Custom Development"
],
key="service_type"
)
with col6:
personalization_level = st.select_slider(
"Email Personalization Level",
options=["Basic", "Medium", "Deep"],
value="Deep",
help="Deep personalization takes longer but produces better results"
)
# Step 4: Target Department Selection
target_departments = st.multiselect(
"Which departments should we target?",
[
"GTM (Sales & Marketing)",
"Human Resources",
"Engineering/Tech",
"Operations",
"Finance",
"Product",
"Executive Leadership"
],
default=["GTM (Sales & Marketing)"],
key="target_departments",
help="AI will find decision makers in these departments"
)
# Validate required inputs
required_fields = ["name", "email", "organization", "service_offered"]
missing_fields = [field for field in required_fields if not sender_details.get(field)]
if missing_fields:
st.error(f"Please fill in required fields: {', '.join(missing_fields)}")
st.stop()
if not target_departments:
st.error("Please select at least one target department")
st.stop()
if not selected_category:
st.error("Please select a company category")
st.stop()
if not service_type:
st.error("Please select a service type")
st.stop()
# Create and return configuration
outreach_config = OutreachConfig(
company_category=selected_category,
target_departments=target_departments,
service_type=service_type,
company_size_preference=company_size,
personalization_level=personalization_level
)
return outreach_config, sender_details, num_companies
def main():
"""
Main entry point for running the automated B2B outreach workflow.
"""
try:
# Set page config must be the first Streamlit command
st.set_page_config(
page_title="Automated B2B Email Outreach",
layout="wide",
initial_sidebar_state="expanded"
)
# API Keys in Sidebar
st.sidebar.header("🔑 API Configuration")
# Update API keys from sidebar
st.session_state.EXA_API_KEY = st.sidebar.text_input(
"Exa API Key *",
value=st.session_state.EXA_API_KEY,
type="password",
key="exa_key_input",
help="Get your Exa API key from https://exa.ai"
)
st.session_state.OPENAI_API_KEY = st.sidebar.text_input(
"OpenAI API Key *",
value=st.session_state.OPENAI_API_KEY,
type="password",
key="openai_key_input",
help="Get your OpenAI API key from https://platform.openai.com"
)
# Update environment variables
os.environ["EXA_API_KEY"] = st.session_state.EXA_API_KEY
os.environ["OPENAI_API_KEY"] = st.session_state.OPENAI_API_KEY
# Validate API keys
if not st.session_state.EXA_API_KEY or not st.session_state.OPENAI_API_KEY:
st.sidebar.error("⚠️ Both API keys are required to run the application")
else:
st.sidebar.success("✅ API keys configured")
# Add guidance about API keys
st.sidebar.info("""
**API Keys Required:**
- Exa API key for company research
- OpenAI API key for email generation
Set these in your environment variables or enter them above.
""")
# Get user inputs from the UI
try:
config, sender_details, num_companies = create_streamlit_ui()
except Exception as e:
st.error(f"Configuration error: {str(e)}")
st.stop()
# Generate Emails Section
st.header("4️⃣ Generate Outreach Campaign")
st.info(f"""
**Ready to launch automated prospecting:**
- Target: {config.company_category} companies ({config.company_size_preference})
- Departments: {', '.join(config.target_departments)}
- Service: {config.service_type}
- Companies to find: {num_companies}
""")
if st.button("🚀 Start Automated Campaign", key="generate_button", type="primary"):
# Check if API keys are configured
if not st.session_state.EXA_API_KEY or not st.session_state.OPENAI_API_KEY:
st.error("❌ Please configure both API keys before starting the campaign")
st.stop()
try:
# Progress tracking
progress_bar = st.progress(0)
status_text = st.empty()
results_container = st.container()
with st.spinner("Initializing AI research agents..."):
# Setup the database
db = SqliteDb(
db_file="tmp/agno_workflows.db",
)
workflow = PersonalisedEmailGenerator(
session_id="streamlit-email-generator",
db=db
)
status_text.text("🔍 Discovering companies and generating emails...")
# Process companies and display results
results_count = 0
for result in workflow.run(
config=config,
sender_details=sender_details,
num_companies=num_companies,
use_cache=True
):
# Update progress bar and status
if 'progress' in result:
progress_bar.progress(result['progress'])
status_text.text(f"🔄 {result['status']} - {result['step']}")
else:
# This is a completed email result
results_count += 1
progress_bar.progress(result.get('progress', results_count / num_companies))
status_text.text(f"✅ {result['step']}")
# Only display results for completed emails
if 'email' in result:
with results_container:
# Create a more visually appealing card layout
with st.container():
st.markdown("---")
# Header with company info
col_header1, col_header2 = st.columns([3, 1])
with col_header1:
st.markdown(f"### 📧 {result['company_name']}")
with col_header2:
st.success(f"✅ Email #{results_count}")
# Create tabs for different information
tab1, tab2, tab3, tab4 = st.tabs(["📝 Generated Email", "🏢 Company Research", "👥 Contacts Found", "📊 Summary"])
with tab1:
# Email display with better formatting
st.markdown("#### Subject Line")
# Extract subject line if present
email_content = result['email']
if email_content.startswith('Subject:'):
lines = email_content.split('\n', 1)
subject = lines[0].replace('Subject:', '').strip()
body = lines[1] if len(lines) > 1 else ""
st.info(f"**{subject}**")
st.markdown("#### Email Body")
st.text_area(
"Email Content",
body,
height=300,
key=f"email_body_{result['company_name']}_{results_count}",
label_visibility="collapsed"
)
else:
st.text_area(
"Email Content",
email_content,
height=300,
key=f"email_body_{result['company_name']}_{results_count}",
label_visibility="collapsed"
)
# Copy button
if st.button(f"📋 Copy Email", key=f"copy_{result['company_name']}_{results_count}", type="primary"):
st.success("📋 Email copied to clipboard!")
with tab2:
# Company research with better formatting
st.markdown("#### Company Intelligence")
company_data = result['company_data']
# Key metrics in columns
col_metrics1, col_metrics2 = st.columns(2)
with col_metrics1:
if company_data.get('industry'):
st.metric("Industry", company_data['industry'])
if company_data.get('company_size'):
st.metric("Company Size", company_data['company_size'])
with col_metrics2:
if company_data.get('founded_year'):
st.metric("Founded", company_data['founded_year'])
if company_data.get('funding_status'):
st.metric("Funding", company_data['funding_status'])
# Core business info
if company_data.get('core_business'):
st.markdown("#### Business Focus")
st.write(company_data['core_business'])
# Additional details
if company_data.get('technologies'):
st.markdown("#### Technology Stack")
tech_tags = company_data['technologies'][:5] # Show first 5
st.write(", ".join(tech_tags))
# Raw data expander
with st.expander("🔍 View Raw Research Data"):
st.json(company_data)
with tab3:
# Contacts with better formatting
st.markdown("#### Decision Makers Found")
contacts_text = result['contacts']
# Try to parse contacts if they're structured
if contacts_text:
st.text_area(
"Contact Information",
contacts_text,
height=200,
key=f"contacts_{result['company_name']}_{results_count}",
label_visibility="collapsed"
)
# Copy contacts button
if st.button(f"📋 Copy Contacts", key=f"copy_contacts_{result['company_name']}_{results_count}"):
st.success("📋 Contacts copied!")
else:
st.warning("No contact information found for this company.")
with tab4:
# Summary tab with key insights
st.markdown("#### Campaign Summary")
# Key stats
col_summary1, col_summary2, col_summary3 = st.columns(3)
with col_summary1:
st.metric("Personalization Level", config.personalization_level)
with col_summary2:
st.metric("Service Type", config.service_type)
with col_summary3:
st.metric("Target Dept", config.target_departments[0] if config.target_departments else "N/A")
# Email quality indicators
email_length = len(result['email'])
st.markdown("#### Email Quality")
col_quality1, col_quality2 = st.columns(2)
with col_quality1:
st.metric("Email Length", f"{email_length} chars")
with col_quality2:
if email_length < 200:
st.metric("Length Rating", "🟢 Concise")
elif email_length < 400:
st.metric("Length Rating", "🟡 Good")
else:
st.metric("Length Rating", "🔴 Long")
# Personalization score
personalization_score = 85 # Placeholder - could be calculated
st.markdown("#### Personalization Score")
st.progress(personalization_score / 100)
st.caption(f"Score: {personalization_score}/100 - {'Excellent' if personalization_score > 80 else 'Good' if personalization_score > 60 else 'Needs Improvement'}")
# Footer with timestamp
st.caption(f"Generated on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
# Final status with enhanced display
if results_count > 0:
progress_bar.progress(1.0)
status_text.text(f"🎉 Campaign complete! Generated {results_count} personalized emails")
# Success summary
st.success(f"🎉 **Campaign Complete!** Successfully generated {results_count} personalized emails")
# Campaign summary metrics
st.markdown("### 📊 Campaign Summary")
col_summary1, col_summary2, col_summary3, col_summary4 = st.columns(4)
with col_summary1:
st.metric("Emails Generated", results_count)
with col_summary2:
st.metric("Target Companies", num_companies)
with col_summary3:
st.metric("Success Rate", f"{(results_count/num_companies)*100:.1f}%")
with col_summary4:
st.metric("Service Type", config.service_type)
# Action buttons for campaign
st.markdown("### 🚀 Next Steps")
col_action1, col_action2, col_action3 = st.columns(3)
with col_action1:
if st.button("📧 Export All Emails", key="export_all", type="primary"):
st.success("💾 All emails exported successfully!")
with col_action2:
if st.button("📊 Generate Report", key="generate_report"):
st.info("📈 Campaign report generated!")
with col_action3:
if st.button("🔄 Run New Campaign", key="new_campaign"):
st.rerun()
# Celebration
st.balloons()
else:
st.error("❌ **No emails were generated.** Please try adjusting your criteria or check your API keys.")
# Troubleshooting tips
with st.expander("🔧 Troubleshooting Tips"):
st.markdown("""
**Common issues and solutions:**
1. **API Keys**: Make sure both Exa and OpenAI API keys are valid
2. **Company Criteria**: Try broader categories or different company sizes
3. **Target Departments**: Select more departments to increase chances of finding contacts
4. **Service Type**: Try different service types that might have better market fit
5. **Number of Companies**: Start with fewer companies (1-3) for testing
""")
except Exception as e:
st.error(f"Campaign failed: {str(e)}")
logger.error(f"Workflow failed: {e}")
st.exception(e)
st.sidebar.markdown("### About")
st.sidebar.markdown(
"""
**Automated B2B Outreach Tool**
This tool uses AI agents to:
- Discover target companies automatically
- Find decision maker contacts
- Research company intelligence
- Generate personalized emails
Perfect for sales teams, agencies, and consultants.
"""
)
except Exception as e:
logger.error(f"Workflow failed: {e}")
st.error(f"An error occurred: {str(e)}")
raise
if __name__ == "__main__":
main() | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/single_agent_apps/ai_email_gtm_reachout_agent/ai_email_gtm_reachout.py",
"license": "Apache License 2.0",
"lines": 904,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_2_loop_agent/agent.py | import os
import asyncio
import inspect
from typing import AsyncGenerator, Dict, Any
from dotenv import load_dotenv
from google.adk.agents import LlmAgent, LoopAgent
from google.adk.agents.base_agent import BaseAgent
from google.adk.agents.invocation_context import InvocationContext
from google.adk.sessions import InMemorySessionService
from google.adk.runners import Runner
from google.adk.events import Event, EventActions
from google.genai import types
# Load environment variables
load_dotenv()
# ------------------------------------------------------------
# Sub-agent 1: LLM refiner that improves the plan each iteration
# ------------------------------------------------------------
plan_refiner = LlmAgent(
name="plan_refiner",
model="gemini-3-flash-preview",
description="Iteratively refines a brief product/launch plan given topic and prior context",
instruction=(
"You are an iterative planner. On each turn:\n"
"- Improve and tighten the current plan for the topic in session state\n"
"- Keep it concise (5-8 bullets) and avoid repeating prior text verbatim\n"
"- Incorporate clarity, feasibility, and crisp sequencing\n"
"- Assume this output will be refined again in subsequent iterations\n\n"
"Output format:\n"
"Title line\n"
"- Bullet 1\n- Bullet 2\n- Bullet 3 ..."
),
)
# ------------------------------------------------------------
# Sub-agent 2: Progress tracker increments iteration counter
# ------------------------------------------------------------
class IncrementIteration(BaseAgent):
async def _run_async_impl(self, ctx: InvocationContext) -> AsyncGenerator[Event, None]:
current_iteration = int(ctx.session.state.get("iteration", 0)) + 1
ctx.session.state["iteration"] = current_iteration
yield Event(
author=self.name,
content=types.Content(
role="model",
parts=[
types.Part(
text=f"Iteration advanced to {current_iteration}"
)
],
),
)
# ------------------------------------------------------------
# Sub-agent 3: Completion check with optional early stop
# - Stops if iteration >= target_iterations OR session flag 'accepted' is True
# ------------------------------------------------------------
class CheckCompletion(BaseAgent):
async def _run_async_impl(self, ctx: InvocationContext) -> AsyncGenerator[Event, None]:
target_iterations = int(ctx.session.state.get("target_iterations", 3))
current_iteration = int(ctx.session.state.get("iteration", 0))
accepted = bool(ctx.session.state.get("accepted", False))
reached_limit = current_iteration >= target_iterations
should_stop = accepted or reached_limit
yield Event(
author=self.name,
actions=EventActions(escalate=should_stop),
content=types.Content(
role="model",
parts=[
types.Part(
text=(
"Stopping criteria met"
if should_stop
else "Continuing loop"
)
)
],
),
)
increment_iteration = IncrementIteration(name="increment_iteration")
check_completion = CheckCompletion(name="check_completion")
# ------------------------------------------------------------
# LoopAgent: Executes sub-agents sequentially in a loop
# - Termination: max_iterations, or CheckCompletion escalates
# - Context & State: Same InvocationContext across iterations
# ------------------------------------------------------------
spec_refinement_loop = LoopAgent(
name="spec_refinement_loop",
description=(
"Iteratively refines a plan using LLM, tracks iterations, and stops when target iterations "
"are reached or an 'accepted' flag is set in session state."
),
max_iterations=10,
sub_agents=[
plan_refiner,
increment_iteration,
check_completion,
],
)
# ------------------------------------------------------------
# Runner setup
# ------------------------------------------------------------
session_service = InMemorySessionService()
runner = Runner(
agent=spec_refinement_loop,
app_name="loop_refinement_app",
session_service=session_service,
)
# ------------------------------------------------------------
# Public API: run the loop refinement for a topic
# ------------------------------------------------------------
async def iterate_spec_until_acceptance(
user_id: str, topic: str, target_iterations: int = 3
) -> Dict[str, Any]:
"""Run the LoopAgent to iteratively refine a plan.
Returns a dictionary with final plan text and iteration metadata.
"""
session_id = f"loop_refinement_{user_id}"
async def _maybe_await(value):
return await value if inspect.isawaitable(value) else value
# Create or get session (support both sync/async services)
session = await _maybe_await(session_service.get_session(
app_name="loop_refinement_app",
user_id=user_id,
session_id=session_id,
))
if not session:
session = await _maybe_await(session_service.create_session(
app_name="loop_refinement_app",
user_id=user_id,
session_id=session_id,
state={
"topic": topic,
"iteration": 0,
"target_iterations": int(target_iterations),
# Optionally, an external process or UI could set this to True to stop early
"accepted": False,
},
))
else:
# Refresh topic/target if user re-runs on UI
if hasattr(session, "state") and isinstance(session.state, dict):
session.state["topic"] = topic
session.state["target_iterations"] = int(target_iterations)
# Seed message for LLM
user_content = types.Content(
role="user",
parts=[
types.Part(
text=(
"Topic: "
+ topic
+ "\nPlease produce or refine a concise plan."
)
)
],
)
final_text = ""
last_plan_text = ""
stream = runner.run_async(user_id=user_id, session_id=session_id, new_message=user_content)
# Support both async generators and plain iterables
if inspect.isasyncgen(stream):
async for event in stream:
if event.content and getattr(event.content, "parts", None):
for part in event.content.parts:
if hasattr(part, "text") and part.text:
# Keep last text from plan_refiner preferentially
if getattr(event, "author", "") == plan_refiner.name:
last_plan_text = part.text
if event.is_final_response():
final_text = part.text
else:
for event in stream:
if event.content and getattr(event.content, "parts", None):
for part in event.content.parts:
if hasattr(part, "text") and part.text:
if getattr(event, "author", "") == plan_refiner.name:
last_plan_text = part.text
# final events in sync mode
final_text = part.text
if event.content and getattr(event.content, "parts", None):
for part in event.content.parts:
if hasattr(part, "text") and part.text:
# Keep last text from plan_refiner preferentially
if getattr(event, "author", "") == plan_refiner.name:
last_plan_text = part.text
if event.is_final_response():
final_text = part.text
current_iteration = int(session.state.get("iteration", 0))
reached = current_iteration >= int(session.state.get("target_iterations", 0))
accepted = bool(session.state.get("accepted", False))
return {
"final_plan": last_plan_text or final_text,
"iterations": current_iteration,
"stopped_reason": "accepted" if accepted else ("target_iterations" if reached else "max_iterations_or_other"),
}
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_2_loop_agent/agent.py",
"license": "Apache License 2.0",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_2_loop_agent/app.py | import streamlit as st
import asyncio
from agent import iterate_spec_until_acceptance
st.set_page_config(page_title="Loop Agent Demo", page_icon=":repeat:", layout="wide")
st.title("🔁 Iterative Plan Refiner with Gemini 3 Flash(Loop Agent)")
st.markdown(
"""
This demo runs a LoopAgent that repeatedly executes sub-agents to iteratively refine a plan.
Loop characteristics:
- Executes its sub-agents sequentially in a loop
- Terminates when the session's `accepted` flag is set or after the target iterations
- Shares the same session state across iterations, so counters/flags persist
"""
)
user_id = "demo_loop_user"
st.header("Run an iterative refinement")
topic = st.text_area(
"Topic",
value="AI-powered customer support platform launch plan",
height=100,
placeholder="What plan/topic should be refined iteratively?",
)
col_a, col_b = st.columns([1, 1])
with col_a:
target_iterations = st.number_input(
"Target iterations (early stop possible)", min_value=1, max_value=20, value=3, step=1
)
with col_b:
st.caption(
"Set a reasonable number of iterations. The loop may stop earlier if the session state flag `accepted` becomes True."
)
if st.button("Run Loop Refinement", type="primary"):
if topic.strip():
st.info("Refining plan in a loop…")
with st.spinner("Working…"):
try:
results = asyncio.run(
iterate_spec_until_acceptance(user_id, topic, int(target_iterations))
)
st.success("Loop finished")
st.subheader("Final Refined Plan")
st.write(results.get("final_plan", ""))
st.subheader("Run Metadata")
st.write({
"iterations": results.get("iterations"),
"stopped_reason": results.get("stopped_reason"),
})
except Exception as e:
st.error(f"Error: {e}")
else:
st.error("Please enter a topic")
with st.sidebar:
st.header("How it works")
st.markdown(
"""
- Uses `LoopAgent` with 3 sub-agents:
1) `plan_refiner` (LLM) refines the plan
2) `increment_iteration` updates the iteration counter in session state
3) `check_completion` escalates when done (accepted flag or target reached)
- The same `InvocationContext` and session state are reused every iteration
- The loop stops if `accepted` is True or the `target_iterations` is reached.
"""
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_2_loop_agent/app.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_3_parallel_agent/agent.py | from typing import Dict, Any
import inspect
from dotenv import load_dotenv
from google.adk.agents import LlmAgent, ParallelAgent
from google.adk.sessions import InMemorySessionService
from google.adk.runners import Runner
from google.genai import types
load_dotenv()
# Child agents write to distinct keys in session.state for UI consumption
market_trends_agent = LlmAgent(
name="market_trends_agent",
model="gemini-3-flash-preview",
description="Summarizes recent market trends for the topic",
instruction=(
"Summarize 3-5 recent market trends for the topic in session.state['topic'].\n"
"Output a concise markdown list."
),
)
competitor_intel_agent = LlmAgent(
name="competitor_intel_agent",
model="gemini-3-flash-preview",
description="Identifies key competitors and positioning",
instruction=(
"List 3-5 notable competitors for session.state['topic'] and describe their positioning briefly."
),
)
funding_news_agent = LlmAgent(
name="funding_news_agent",
model="gemini-3-flash-preview",
description="Reports funding/partnership news",
instruction=(
"Provide a short digest (bulleted) of recent funding or partnership news related to session.state['topic']."
),
)
# Parallel orchestrator
market_snapshot_team = ParallelAgent(
name="market_snapshot_team",
description="Runs multiple research agents concurrently to produce a market snapshot",
sub_agents=[
market_trends_agent,
competitor_intel_agent,
funding_news_agent,
],
)
# Runner and session service
session_service = InMemorySessionService()
runner = Runner(agent=market_snapshot_team, app_name="parallel_snapshot_app", session_service=session_service)
async def gather_market_snapshot(user_id: str, topic: str) -> Dict[str, Any]:
"""Execute the parallel agents and return combined snapshot text blocks.
Returns keys: 'market_trends', 'competitors', 'funding_news'.
"""
session_id = f"parallel_snapshot_{user_id}"
async def _maybe_await(v):
return await v if inspect.isawaitable(v) else v
session = await _maybe_await(
session_service.get_session(
app_name="parallel_snapshot_app", user_id=user_id, session_id=session_id
)
)
if not session:
session = await _maybe_await(
session_service.create_session(
app_name="parallel_snapshot_app",
user_id=user_id,
session_id=session_id,
state={"topic": topic},
)
)
else:
if hasattr(session, "state") and isinstance(session.state, dict):
session.state["topic"] = topic
user_content = types.Content(
role="user",
parts=[types.Part(text=f"Topic: {topic}. Provide a concise snapshot per agent focus.")],
)
# Collect last text emitted per agent
last_text_by_agent: Dict[str, str] = {}
stream = runner.run_async(user_id=user_id, session_id=session_id, new_message=user_content)
if inspect.isasyncgen(stream):
async for event in stream:
if getattr(event, "content", None) and getattr(event.content, "parts", None):
for part in event.content.parts:
if hasattr(part, "text") and part.text:
author = getattr(event, "author", "")
if author:
last_text_by_agent[author] = part.text
else:
for event in stream:
if getattr(event, "content", None) and getattr(event.content, "parts", None):
for part in event.content.parts:
if hasattr(part, "text") and part.text:
author = getattr(event, "author", "")
if author:
last_text_by_agent[author] = part.text
return {
"market_trends": last_text_by_agent.get(market_trends_agent.name, ""),
"competitors": last_text_by_agent.get(competitor_intel_agent.name, ""),
"funding_news": last_text_by_agent.get(funding_news_agent.name, ""),
}
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_3_parallel_agent/agent.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_3_parallel_agent/app.py | import streamlit as st
import asyncio
from agent import market_snapshot_team, gather_market_snapshot
st.set_page_config(page_title="Parallel Agent Demo", page_icon=":fast_forward:", layout="wide")
st.title("⚡ Market Snapshot with Gemini 3 Flash(Parallel Agents)")
st.markdown(
"""
This demo runs multiple research agents in parallel using a ParallelAgent:
- Market trends analysis
- Competitor intelligence
- Funding and partnerships news
Each sub-agent writes its results into a shared session.state under distinct keys. A subsequent step (or this UI) can read the combined snapshot.
"""
)
user_id = "demo_parallel_user"
st.header("Run a market snapshot")
topic = st.text_input(
"Research topic",
value="AI-powered customer support platforms",
placeholder="What market/topic do you want a quick parallel snapshot on?",
)
if st.button("Run Parallel Research", type="primary"):
if topic.strip():
st.info("Running parallel agents… market trends, competitors, and funding news")
with st.spinner("Gathering snapshot…"):
try:
results = asyncio.run(gather_market_snapshot(user_id, topic))
st.success("Snapshot ready")
col1, col2, col3 = st.columns(3)
with col1:
st.subheader("Market Trends")
st.write(results.get("market_trends", ""))
with col2:
st.subheader("Competitors")
st.write(results.get("competitors", ""))
with col3:
st.subheader("Funding News")
st.write(results.get("funding_news", ""))
except Exception as e:
st.error(f"Error: {e}")
else:
st.error("Please enter a topic")
with st.sidebar:
st.header("How it works")
st.markdown(
"""
- Uses `ParallelAgent` to execute sub-agents concurrently
- Each child runs on its own invocation branch, but shares the same session.state
- Distinct `output_key`s prevent overwrites in the shared state
- This pattern is ideal for fan-out data gathering before synthesis
"""
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_3_parallel_agent/app.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_llm_apps/gpt_oss_critique_improvement_loop/streamlit_app.py | """Streamlit Critique & Improvement Loop Demo using GPT-OSS via Groq
This implements the "Automatic Critique + Improvement Loop" pattern:
1. Generate initial answer (Pro Mode style)
2. Have a critic model identify flaws/missing pieces
3. Revise the answer addressing all critiques
4. Repeat if needed
Run with:
streamlit run streamlit_app.py
"""
import os
import time
import concurrent.futures as cf
from typing import List, Dict, Any
import streamlit as st
from groq import Groq, GroqError
MODEL = "openai/gpt-oss-120b"
MAX_COMPLETION_TOKENS = 1024 # stay within Groq limits
SAMPLE_PROMPTS = [
"Explain how to implement a binary search tree in Python.",
"What are the best practices for API design?",
"How would you optimize a slow database query?",
"Explain the concept of recursion with examples.",
]
# --- Helper functions --------------------------------------------------------
def _one_completion(client: Groq, messages: List[Dict[str, str]], temperature: float) -> str:
"""Single non-streaming completion with basic retries."""
delay = 0.5
for attempt in range(3):
try:
resp = client.chat.completions.create(
model=MODEL,
messages=messages,
temperature=temperature,
max_completion_tokens=MAX_COMPLETION_TOKENS,
top_p=1,
stream=False,
)
return resp.choices[0].message.content
except GroqError:
if attempt == 2:
raise
time.sleep(delay)
delay *= 2
def generate_initial_answer(client: Groq, prompt: str) -> str:
"""Generate initial answer using parallel candidates + synthesis (Pro Mode)."""
# Generate 3 candidates in parallel
candidates = []
with cf.ThreadPoolExecutor(max_workers=3) as ex:
futures = [
ex.submit(_one_completion, client,
[{"role": "user", "content": prompt}], 0.9)
for _ in range(3)
]
for fut in cf.as_completed(futures):
candidates.append(fut.result())
# Synthesize candidates
candidate_texts = []
for i, c in enumerate(candidates):
candidate_texts.append(f"--- Candidate {i+1} ---\n{c}")
synthesis_prompt = (
f"You are given 3 candidate answers. Synthesize them into ONE best answer, "
f"eliminating repetition and ensuring coherence:\n\n"
f"{chr(10).join(candidate_texts)}\n\n"
f"Return the single best final answer."
)
return _one_completion(client, [{"role": "user", "content": synthesis_prompt}], 0.2)
def critique_answer(client: Groq, prompt: str, answer: str) -> str:
"""Have a critic model identify flaws and missing pieces."""
critique_prompt = (
f"Original question: {prompt}\n\n"
f"Answer to critique:\n{answer}\n\n"
f"Act as a critical reviewer. List specific flaws, missing information, "
f"unclear explanations, or areas that need improvement. Be constructive but thorough. "
f"Format as a bulleted list starting with '•'."
)
return _one_completion(client, [{"role": "user", "content": critique_prompt}], 0.3)
def revise_answer(client: Groq, prompt: str, original_answer: str, critiques: str) -> str:
"""Revise the original answer addressing all critiques."""
revision_prompt = (
f"Original question: {prompt}\n\n"
f"Original answer:\n{original_answer}\n\n"
f"Critiques to address:\n{critiques}\n\n"
f"Revise the original answer to address every critique point. "
f"Maintain the good parts, fix the issues, and add missing information. "
f"Return the improved answer."
)
return _one_completion(client, [{"role": "user", "content": revision_prompt}], 0.2)
def critique_improvement_loop(prompt: str, max_iterations: int = 2, groq_api_key: str | None = None) -> Dict[str, Any]:
"""Main function implementing the critique and improvement loop."""
client = Groq(api_key=groq_api_key) if groq_api_key else Groq()
results = {
"iterations": [],
"final_answer": "",
"total_iterations": 0
}
# Generate initial answer
with st.spinner("Generating initial answer..."):
initial_answer = generate_initial_answer(client, prompt)
results["iterations"].append({
"type": "initial",
"answer": initial_answer,
"critiques": None
})
current_answer = initial_answer
# Improvement loop
for iteration in range(max_iterations):
with st.spinner(f"Critiquing iteration {iteration + 1}..."):
critiques = critique_answer(client, prompt, current_answer)
with st.spinner(f"Revising iteration {iteration + 1}..."):
revised_answer = revise_answer(client, prompt, current_answer, critiques)
results["iterations"].append({
"type": "improvement",
"answer": revised_answer,
"critiques": critiques
})
current_answer = revised_answer
results["final_answer"] = current_answer
results["total_iterations"] = len(results["iterations"])
return results
# --- Streamlit UI ------------------------------------------------------------
st.set_page_config(page_title="Critique & Improvement Loop", page_icon="🔄", layout="wide")
st.title("🔄 Critique & Improvement Loop")
st.markdown(
"Generate high-quality answers through iterative critique and improvement using GPT-OSS."
)
with st.sidebar:
st.header("Settings")
api_key = st.text_input("Groq API Key", value=os.getenv("GROQ_API_KEY", ""), type="password")
max_iterations = st.slider("Max Improvement Iterations", 1, 3, 2)
st.markdown("---")
st.caption("Each iteration adds critique + revision steps for higher quality.")
# Initialize prompt in session state if not present
if "prompt" not in st.session_state:
st.session_state["prompt"] = ""
def random_prompt_callback():
import random
st.session_state["prompt"] = random.choice(SAMPLE_PROMPTS)
prompt = st.text_area("Your prompt", height=150, placeholder="Ask me anything…", key="prompt")
col1, col2 = st.columns([1, 1])
with col1:
st.button("🔄 Random Sample Prompt", on_click=random_prompt_callback)
with col2:
generate_clicked = st.button("🚀 Start Critique Loop")
if generate_clicked:
if not prompt.strip():
st.error("Please enter a prompt.")
st.stop()
try:
results = critique_improvement_loop(prompt, max_iterations, groq_api_key=api_key or None)
except Exception as e:
st.exception(e)
st.stop()
# Display results
st.subheader("🎯 Final Answer")
st.write(results["final_answer"])
# Show improvement history
with st.expander(f"📋 Show Improvement History ({results['total_iterations']} iterations)"):
for i, iteration in enumerate(results["iterations"]):
if iteration["type"] == "initial":
st.markdown(f"### 🚀 Initial Answer")
st.write(iteration["answer"])
else:
st.markdown(f"### 🔍 Iteration {i}")
# Show critiques
if iteration["critiques"]:
st.markdown("**Critiques:**")
st.write(iteration["critiques"])
# Show improved answer
st.markdown("**Improved Answer:**")
st.write(iteration["answer"])
if i < len(results["iterations"]) - 1:
st.markdown("---")
# Summary metrics
st.markdown("---")
col1, col2, col3 = st.columns(3)
with col1:
st.metric("Total Iterations", results["total_iterations"])
with col2:
st.metric("Improvement Rounds", max_iterations)
with col3:
st.metric("Final Answer Length", len(results["final_answer"])) | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_llm_apps/gpt_oss_critique_improvement_loop/streamlit_app.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/7_plugins/agent.py | import asyncio
from datetime import datetime
from typing import Optional, Dict, Any
from google.adk.agents import LlmAgent
from google.adk.agents.base_agent import BaseAgent
from google.adk.agents.callback_context import CallbackContext
from google.adk.plugins.base_plugin import BasePlugin
from google.adk.runners import InMemoryRunner
from google.adk.tools.base_tool import BaseTool
from google.adk.tools.tool_context import ToolContext
from google.genai import types
from dotenv import load_dotenv
# Load environment variables (API key)
load_dotenv()
# ============================================================================
# PLUGIN DEFINITION
# ============================================================================
# Plugins extend BasePlugin and provide global callbacks across all agents/tools
class SimplePlugin(BasePlugin):
def __init__(self) -> None:
super().__init__(name="simple_plugin")
# Track usage statistics across all executions
self.agent_count = 0
self.tool_count = 0
# Called when user sends a message - can modify the input
async def on_user_message_callback(self, *, invocation_context, user_message: types.Content) -> Optional[types.Content]:
timestamp = datetime.now().strftime("%H:%M:%S")
print(f"🔍 [Plugin] User message at {timestamp}")
# Add timestamp to each message part for context
modified_parts = [types.Part(text=f"[{timestamp}] {part.text}") for part in user_message.parts if hasattr(part, 'text')]
return types.Content(role='user', parts=modified_parts)
# Called before each agent execution - good for logging and setup
async def before_agent_callback(self, *, agent: BaseAgent, callback_context: CallbackContext) -> None:
self.agent_count += 1
print(f"🤖 [Plugin] Agent {agent.name} starting (count: {self.agent_count})")
# Called before each tool execution - track tool usage
async def before_tool_callback(self, *, tool: BaseTool, tool_args: Dict[str, Any], tool_context: ToolContext) -> None:
self.tool_count += 1
print(f"🔧 [Plugin] Tool {tool.name} starting (count: {self.tool_count})")
# Called after the entire run completes - generate final report
async def after_run_callback(self, *, invocation_context) -> None:
print(f"📊 [Plugin] Final Report: {self.agent_count} agents, {self.tool_count} tools")
# ============================================================================
# TOOL DEFINITION
# ============================================================================
# This tool can fail (division by zero) to demonstrate error handling
async def calculator_tool(tool_context: ToolContext, operation: str, a: float, b: float) -> Dict[str, Any]:
print(f"🔧 [Tool] Calculator: {operation}({a}, {b})")
if operation == "divide" and b == 0:
raise ValueError("Division by zero is not allowed")
# Dictionary of operations for cleaner code
ops = {"add": lambda x, y: x + y, "subtract": lambda x, y: x - y, "multiply": lambda x, y: x * y, "divide": lambda x, y: x / y}
if operation not in ops:
raise ValueError(f"Unknown operation: {operation}")
return {"operation": operation, "a": a, "b": b, "result": ops[operation](a, b)}
# ============================================================================
# AGENT AND RUNNER SETUP
# ============================================================================
# Create agent with the calculator tool
agent = LlmAgent(name="plugin_demo_agent", model="gemini-3-flash-preview",
instruction="You are a helpful assistant that can perform calculations. Use the calculator_tool when needed.",
tools=[calculator_tool])
# Create runner and register the plugin - this makes the plugin global
runner = InMemoryRunner(agent=agent, app_name="plugin_demo_app", plugins=[SimplePlugin()])
# ============================================================================
# AGENT EXECUTION FUNCTION
# ============================================================================
async def run_agent(message: str) -> str:
# Session management for conversation state
user_id, session_id = "demo_user", "demo_session"
session_service = runner.session_service
# Get or create session (required for ADK)
session = await session_service.get_session(app_name="plugin_demo_app", user_id=user_id, session_id=session_id)
if not session:
session = await session_service.create_session(app_name="plugin_demo_app", user_id=user_id, session_id=session_id)
# Create user message content
user_content = types.Content(role='user', parts=[types.Part(text=message)])
# Run agent and collect response - plugin callbacks will fire automatically
response_text = ""
async for event in runner.run_async(user_id=user_id, session_id=session_id, new_message=user_content):
if event.content and event.content.parts:
for part in event.content.parts:
if hasattr(part, 'text') and part.text:
response_text += part.text
return response_text if response_text else "No response received from agent."
# ============================================================================
# MAIN EXECUTION
# ============================================================================
if __name__ == "__main__":
# Test the plugin functionality
asyncio.run(run_agent("what is 2 + 2?"))
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/7_plugins/agent.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/7_plugins/app.py | import streamlit as st
import asyncio
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent))
from agent import run_agent
st.set_page_config(page_title="Google ADK Plugins Tutorial", page_icon="🔌")
st.title("🔌 Google ADK Plugins Tutorial")
st.markdown("Demonstrates plugins for cross-cutting concerns like logging and monitoring.")
test_scenarios = {
"Normal Conversation": "Hello! How are you?",
"Simple Calculation": "Calculate 15 + 27",
"Error Handling": "What is 10 divided by 0?"
}
selected_scenario = st.selectbox("Choose a test scenario:", list(test_scenarios.keys()))
if st.button("🚀 Run Test"):
with st.spinner("Running..."):
try:
response = asyncio.run(run_agent(test_scenarios[selected_scenario]))
st.success("**Agent Response:**")
st.write(response)
except Exception as e:
st.error(f"Error: {str(e)}")
st.markdown("---")
custom_message = st.text_area("Or enter your own message:", placeholder="Type here...")
if st.button("🚀 Run Custom Message"):
if custom_message.strip():
with st.spinner("Processing..."):
try:
response = asyncio.run(run_agent(custom_message))
st.success("**Agent Response:**")
st.write(response)
except Exception as e:
st.error(f"Error: {str(e)}")
else:
st.warning("Please enter a message.")
with st.expander("📚 About Plugins"):
st.markdown("""
**Plugins** are custom code modules that execute at various stages of agent workflow lifecycle.
**Key Features:**
- 🔍 Request logging and modification
- 🤖 Agent execution tracking
- 🔧 Tool usage monitoring
- 📊 Final reporting and analytics
**Plugin Callbacks:**
- `on_user_message_callback()` - Modify user input
- `before_agent_callback()` - Track agent starts
- `before_tool_callback()` - Track tool usage
- `after_run_callback()` - Generate reports
""")
st.markdown("---")
st.markdown("*Part of the Google ADK Crash Course*")
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/7_plugins/app.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/8_simple_multi_agent/multi_agent_researcher/agent.py | from google.adk.agents import LlmAgent
from google.adk.tools.agent_tool import AgentTool
from google.adk.tools import google_search
# --- Sub-agents ---
research_agent = LlmAgent(
name="research_agent",
model="gemini-3-flash-preview",
description="Finds key information and outlines for a given topic.",
instruction=(
"You are a focused research specialist. Given a user topic or goal, "
"conduct thorough research and produce:\n"
"1. A comprehensive bullet list of key facts and findings\n"
"2. Relevant sources and references (when available)\n"
"3. A structured outline for approaching the topic\n"
"4. Current trends or recent developments\n\n"
"Keep your research factual, well-organized, and comprehensive. "
"Use the google_search tool to find current information when needed."
),
tools=[google_search]
)
summarizer_agent = LlmAgent(
name="summarizer_agent",
model="gemini-3-flash-preview",
description="Summarizes research findings clearly and concisely.",
instruction=(
"You are a skilled summarizer. Given research findings, create:\n"
"1. A concise executive summary (2-3 sentences)\n"
"2. 5-7 key bullet points highlighting the most important information\n"
"3. A clear takeaway message\n"
"4. Any critical insights or patterns you notice\n\n"
"Focus on clarity, relevance, and actionable insights. "
"Avoid repetition and maintain the logical flow of information."
),
)
critic_agent = LlmAgent(
name="critic_agent",
model="gemini-3-flash-preview",
description="Provides constructive critique and improvement suggestions.",
instruction=(
"You are a thoughtful analyst and critic. Given research and summaries, provide:\n"
"1. **Gap Analysis**: Identify missing information or areas that need more research\n"
"2. **Risk Assessment**: Highlight potential risks, limitations, or biases\n"
"3. **Opportunity Identification**: Suggest areas for further exploration or improvement\n"
"4. **Quality Score**: Rate the overall research quality (1-10) with justification\n"
"5. **Actionable Recommendations**: Provide specific next steps or improvements\n\n"
"Be constructive, thorough, and evidence-based in your analysis."
),
)
# --- Coordinator (root) agent ---
root_agent = LlmAgent(
name="multi_agent_researcher",
model="gemini-3-flash-preview",
description="Advanced multi-agent research coordinator that orchestrates research, analysis, and critique.",
instruction=(
"You are an advanced research coordinator managing a team of specialized agents.\n\n"
"**Your Research Team:**\n"
"- **research_agent**: Conducts comprehensive research using web search and analysis\n"
"- **summarizer_agent**: Synthesizes findings into clear, actionable insights\n"
"- **critic_agent**: Provides quality analysis, gap identification, and recommendations\n\n"
"**Research Workflow:**\n"
"1. **Research Phase**: Delegate to research_agent to gather comprehensive information\n"
"2. **Synthesis Phase**: Use summarizer_agent to distill findings into key insights\n"
"3. **Analysis Phase**: Engage critic_agent to evaluate quality and identify opportunities\n"
"4. **Integration**: Combine all outputs into a cohesive research report\n\n"
"**For Each Research Request:**\n"
"- Always start with research_agent to gather information\n"
"- Then use summarizer_agent to create clear summaries\n"
"- Finally, engage critic_agent for quality analysis and recommendations\n"
"- Present the final integrated research report to the user\n\n"
"**Output Format:**\n"
"Provide a structured response that includes:\n"
"- Executive Summary\n"
"- Key Findings\n"
"- Critical Analysis\n"
"- Recommendations\n"
"- Next Steps\n\n"
"Coordinate your team effectively to deliver high-quality, comprehensive research."
),
sub_agents=[summarizer_agent, critic_agent],
tools=[AgentTool(research_agent)]
) | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/8_simple_multi_agent/multi_agent_researcher/agent.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_1_sequential_agent/agent.py | import os
import asyncio
import inspect
from dotenv import load_dotenv
from google.adk.agents import LlmAgent, SequentialAgent
from google.adk.tools import google_search
from google.adk.tools.agent_tool import AgentTool
from google.adk.sessions import InMemorySessionService
from google.adk.runners import Runner
from google.genai import types
# Load environment variables
load_dotenv()
# --- Search Agent (Wrapped as AgentTool) ---
search_agent = LlmAgent(
name="search_agent",
model="gemini-3-flash-preview",
description="Conducts web search for current market information and competitive analysis",
instruction=(
"You are a web search specialist. When given a business topic:\n"
"1. Use web search to find current market information\n"
"2. Identify key competitors and their market position\n"
"3. Gather recent industry trends and market data\n"
"4. Find market size estimates and growth projections\n"
"5. Provide comprehensive, up-to-date market analysis\n\n"
"Always use web search to get the most current information available."
),
tools=[google_search]
)
# --- Simple Sub-agents ---
market_researcher = LlmAgent(
name="market_researcher",
model="gemini-3-flash-preview",
description="Conducts market research and competitive analysis using search capabilities",
instruction=(
"You are a market research specialist. Given a business topic:\n"
"1. Use the search_agent to gather current market information\n"
"2. Identify key competitors and their market position\n"
"3. Analyze current market trends and opportunities\n"
"4. Provide industry insights and market size estimates\n"
"5. Synthesize search results into comprehensive market analysis\n\n"
"Provide a comprehensive analysis in clear, structured format based on current web research."
),
tools=[AgentTool(search_agent)]
)
swot_analyzer = LlmAgent(
name="swot_analyzer",
model="gemini-3-flash-preview",
description="Performs SWOT analysis based on market research",
instruction=(
"You are a strategic analyst. Given market research findings:\n"
"1. Identify internal strengths and competitive advantages\n"
"2. Assess internal weaknesses and limitations\n"
"3. Identify external opportunities in the market\n"
"4. Evaluate external threats and challenges\n\n"
"Provide a clear SWOT analysis with actionable insights."
)
)
strategy_formulator = LlmAgent(
name="strategy_formulator",
model="gemini-3-flash-preview",
description="Develops strategic objectives and action plans",
instruction=(
"You are a strategic planner. Given SWOT analysis results:\n"
"1. Define 3-5 key strategic objectives\n"
"2. Create specific action items for each objective\n"
"3. Recommend realistic timeline for implementation\n"
"4. Define success metrics and KPIs to track\n\n"
"Provide a clear strategic plan with actionable steps."
)
)
implementation_planner = LlmAgent(
name="implementation_planner",
model="gemini-3-flash-preview",
description="Creates detailed implementation roadmap",
instruction=(
"You are an implementation specialist. Given the strategy plan:\n"
"1. Identify required resources (human, financial, technical)\n"
"2. Define key milestones and checkpoints\n"
"3. Develop risk mitigation strategies\n"
"4. Provide final recommendations with confidence level\n\n"
"Create a practical implementation roadmap."
)
)
# --- Sequential Agent (Pure Sequential Pattern) ---
business_intelligence_team = SequentialAgent(
name="business_intelligence_team",
description="Sequentially processes business intelligence through research, analysis, strategy, and planning",
sub_agents=[
market_researcher, # Step 1: Market research (with search capabilities)
swot_analyzer, # Step 2: SWOT analysis
strategy_formulator, # Step 3: Strategy development
implementation_planner # Step 4: Implementation planning
]
)
# --- Runner Setup for Execution ---
session_service = InMemorySessionService()
runner = Runner(
agent=business_intelligence_team,
app_name="business_intelligence",
session_service=session_service
)
# --- Simple Execution Function ---
async def analyze_business_intelligence(user_id: str, business_topic: str) -> str:
"""Process business intelligence through the sequential pipeline"""
session_id = f"bi_session_{user_id}"
# Support both sync and async session service
async def _maybe_await(value):
return await value if inspect.isawaitable(value) else value
session = await _maybe_await(session_service.get_session(
app_name="business_intelligence",
user_id=user_id,
session_id=session_id
))
if not session:
session = await _maybe_await(session_service.create_session(
app_name="business_intelligence",
user_id=user_id,
session_id=session_id,
state={"business_topic": business_topic, "conversation_history": []}
))
# Create user content
user_content = types.Content(
role='user',
parts=[types.Part(text=f"Please analyze this business topic: {business_topic}")]
)
# Run the sequential pipeline (support async or sync stream)
response_text = ""
stream = runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content
)
if inspect.isasyncgen(stream):
async for event in stream:
if event.is_final_response():
if event.content and event.content.parts:
response_text = event.content.parts[0].text
else:
for event in stream:
if getattr(event, "is_final_response", lambda: False)():
if event.content and event.content.parts:
response_text = event.content.parts[0].text
return response_text
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_1_sequential_agent/agent.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_1_sequential_agent/app.py | import streamlit as st
import asyncio
from agent import business_intelligence_team, analyze_business_intelligence
# Page configuration
st.set_page_config(
page_title="Sequential Agent Demo",
page_icon=":arrow_right:",
layout="wide"
)
# Title and description
st.title("🚀 Business Implementation Plan Generator Agent")
st.markdown("""
This **Business Implementation Plan Generator Agent** analyzes business opportunities through a comprehensive 4-step process:
1. **🔍 Market Analysis** - Researches market, competitors, and trends using web search
2. **📊 SWOT Analysis** - Identifies strengths, weaknesses, opportunities, and threats
3. **🎯 Strategy Development** - Creates strategic objectives and action plans
4. **📋 Implementation Planning** - Generates detailed business implementation roadmap
**Result**: A complete business implementation plan ready for execution.
""")
# This is a placeholder user_id for demo purposes.
# In a real app, you might use authentication or session state to set this.
user_id = "demo_user"
# Sample business topics
sample_topics = [
"Electric vehicle charging stations in urban areas",
"AI-powered healthcare diagnostics",
"Sustainable food delivery services",
"Remote work collaboration tools",
"Renewable energy storage solutions"
]
# Main content
st.header("Generate Your Business Implementation Plan")
# Topic input
business_topic = st.text_area(
"Enter a business opportunity to analyze:",
value=sample_topics[0],
height=100,
placeholder="Describe a business opportunity, industry, or market you'd like to analyze for implementation planning..."
)
# Sample topics
st.subheader("Or choose from sample business opportunities:")
cols = st.columns(len(sample_topics))
for i, topic in enumerate(sample_topics):
if cols[i].button(topic, key=f"topic_{i}"):
business_topic = topic
st.rerun()
# Analysis button
if st.button("🚀 Generate Business Implementation Plan", type="primary"):
if business_topic.strip():
st.info("🚀 Starting business analysis... This will research the market, perform SWOT analysis, develop strategy, and create an implementation plan.")
# Display the workflow
st.subheader("Business Analysis Workflow")
col1, col2, col3, col4 = st.columns(4)
with col1:
st.markdown("**1. Market Analysis**")
st.markdown("🔍 Web search + competitive research")
with col2:
st.markdown("**2. SWOT Analysis**")
st.markdown("📊 Strengths, Weaknesses, Opportunities, Threats")
with col3:
st.markdown("**3. Strategy Development**")
st.markdown("🎯 Strategic objectives and action plans")
with col4:
st.markdown("**4. Implementation Planning**")
st.markdown("📋 Detailed roadmap and execution plan")
# Run the actual analysis
with st.spinner("Generating comprehensive business implementation plan..."):
try:
result = asyncio.run(analyze_business_intelligence(user_id, business_topic))
st.success("✅ Business Implementation Plan Generated!")
st.subheader("Your Business Implementation Plan")
st.markdown(result)
except Exception as e:
st.error(f"❌ Error during analysis: {str(e)}")
st.info("Make sure you have set up your GOOGLE_API_KEY in the .env file")
else:
st.error("Please enter a business opportunity to analyze.")
# How it works (in sidebar)
with st.sidebar:
st.header("How It Works")
st.markdown("""
The **Business Implementation Plan Generator Agent** uses a sophisticated sequential workflow to create comprehensive business plans:
1. **🔍 Market Analysis Agent**: Uses web search to research current market conditions, competitors, and trends
2. **📊 SWOT Analysis Agent**: Analyzes the market research to identify strategic insights
3. **🎯 Strategy Development Agent**: Creates strategic objectives and action plans based on SWOT analysis
4. **📋 Implementation Planning Agent**: Develops detailed execution roadmaps and resource requirements
**Key Innovation**: The Market Analysis Agent has access to a specialized Search Agent (wrapped as AgentTool) that can perform real-time web searches for current market intelligence.
Each agent builds upon the previous agent's output, creating a comprehensive business implementation plan ready for execution.
""")
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_1_sequential_agent/app.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:rag_tutorials/agentic_rag_gpt5/agentic_rag_gpt5.py | import streamlit as st
import os
from agno.agent import Agent
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.vectordb.lancedb import LanceDb, SearchType
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Page configuration
st.set_page_config(
page_title="Agentic RAG with GPT-5",
page_icon="🧠",
layout="wide"
)
# Main title and description
st.title("🧠 Agentic RAG with GPT-5")
st.markdown("""
This app demonstrates an intelligent AI agent that:
1. **Retrieves** relevant information from knowledge sources using LanceDB
2. **Answers** your questions clearly and concisely
Enter your OpenAI API key in the sidebar to get started!
""")
# Sidebar for API key and settings
with st.sidebar:
st.header("🔧 Configuration")
# OpenAI API Key
openai_key = st.text_input(
"OpenAI API Key",
type="password",
value=os.getenv("OPENAI_API_KEY", ""),
help="Get your key from https://platform.openai.com/"
)
# Add URLs to knowledge base
st.subheader("🌐 Add Knowledge Sources")
new_url = st.text_input(
"Add URL",
placeholder="https://www.theunwindai.com/p/mcp-vs-a2a-complementing-or-supplementing",
help="Enter a URL to add to the knowledge base"
)
if st.button("➕ Add URL", type="primary"):
if new_url:
st.session_state.urls_to_add = new_url
st.success(f"URL added to queue: {new_url}")
else:
st.error("Please enter a URL")
# Check if API key is provided
if openai_key:
# Initialize URLs in session state
if 'knowledge_urls' not in st.session_state:
st.session_state.knowledge_urls = ["https://www.theunwindai.com/p/mcp-vs-a2a-complementing-or-supplementing"] # Default URL
if 'urls_loaded' not in st.session_state:
st.session_state.urls_loaded = set()
# Initialize knowledge base (cached to avoid reloading)
@st.cache_resource(show_spinner="📚 Loading knowledge base...")
def load_knowledge() -> Knowledge:
"""Load and initialize the knowledge base with LanceDB"""
kb = Knowledge(
vector_db=LanceDb(
uri="tmp/lancedb",
table_name="agentic_rag_docs",
search_type=SearchType.vector, # Use vector search
embedder=OpenAIEmbedder(
api_key=openai_key
),
),
)
return kb
# Initialize agent (cached to avoid reloading)
@st.cache_resource(show_spinner="🤖 Loading agent...")
def load_agent(_kb: Knowledge) -> Agent:
"""Create an agent with reasoning capabilities"""
return Agent(
model=OpenAIChat(
id="gpt-5",
api_key=openai_key
),
knowledge=_kb,
search_knowledge=True, # Enable knowledge search
instructions=[
"Always search your knowledge before answering the question.",
"Provide clear, well-structured answers in markdown format.",
"Use proper markdown formatting with headers, lists, and emphasis where appropriate.",
"Structure your response with clear sections and bullet points when helpful.",
],
markdown=True, # Enable markdown formatting
)
# Load knowledge and agent
knowledge = load_knowledge()
# Load initial URLs if any (only load once per URL)
for url in st.session_state.knowledge_urls:
if url not in st.session_state.urls_loaded:
knowledge.add_content(url=url)
st.session_state.urls_loaded.add(url)
agent = load_agent(knowledge)
# Display current URLs in knowledge base
if st.session_state.knowledge_urls:
st.sidebar.subheader("📚 Current Knowledge Sources")
for i, url in enumerate(st.session_state.knowledge_urls, 1):
st.sidebar.markdown(f"{i}. {url}")
# Handle URL additions
if hasattr(st.session_state, 'urls_to_add') and st.session_state.urls_to_add:
new_url = st.session_state.urls_to_add
if new_url not in st.session_state.knowledge_urls:
st.session_state.knowledge_urls.append(new_url)
with st.spinner("📥 Loading new documents..."):
if new_url not in st.session_state.urls_loaded:
knowledge.add_content(url=new_url)
st.session_state.urls_loaded.add(new_url)
st.success(f"✅ Added: {new_url}")
del st.session_state.urls_to_add
st.rerun()
# Main query section
st.divider()
st.subheader("🤔 Ask a Question")
# Suggested prompts
st.markdown("**Try these prompts:**")
col1, col2, col3 = st.columns(3)
with col1:
if st.button("What is MCP?", use_container_width=True):
st.session_state.query = "What is MCP (Model Context Protocol) and how does it work?"
with col2:
if st.button("MCP vs A2A", use_container_width=True):
st.session_state.query = "How do MCP and A2A protocols differ, and are they complementary or competing?"
with col3:
if st.button("Agent Communication", use_container_width=True):
st.session_state.query = "How do MCP and A2A work together in AI agent systems for communication and tool access?"
# Query input
query = st.text_area(
"Your question:",
value=st.session_state.get("query", "What is the difference between MCP and A2A protocols?"),
height=100,
help="Ask anything about the loaded knowledge sources"
)
# Run button
if st.button("🚀 Get Answer", type="primary"):
if query:
# Create container for answer
st.markdown("### 💡 Answer")
answer_container = st.container()
answer_placeholder = answer_container.empty()
# Variables to accumulate content
answer_text = ""
# Stream the agent's response
with st.spinner("🔍 Searching and generating answer..."):
for chunk in agent.run(
query,
stream=True, # Enable streaming
):
# Update answer display - show content from streaming chunks
if hasattr(chunk, 'content') and chunk.content and isinstance(chunk.content, str):
answer_text += chunk.content
answer_placeholder.markdown(
answer_text,
unsafe_allow_html=True
)
else:
st.error("Please enter a question")
else:
# Show instructions if API key is missing
st.info("""
👋 **Welcome! To use this app, you need:**
- **OpenAI API Key** (set it in the sidebar)
- Sign up at [platform.openai.com](https://platform.openai.com/)
- Generate a new API key
Once you enter the key, the app will load the knowledge base and agent.
""")
# Footer with explanation
st.divider()
with st.expander("📖 How This Works"):
st.markdown("""
**This app uses the Agno framework to create an intelligent Q&A system:**
1. **Knowledge Loading**: URLs are processed and stored in LanceDB vector database
2. **Vector Search**: Uses OpenAI's embeddings for semantic search to find relevant information
3. **GPT-5**: OpenAI's GPT-5 model processes the information and generates answers
**Key Components:**
- `Knowledge`: Manages document loading from URLs
- `LanceDb`: Vector database for efficient similarity search
- `OpenAIEmbedder`: Converts text to embeddings using OpenAI's embedding model
- `Agent`: Orchestrates everything to answer questions
**Why LanceDB?**
- Lightweight and easy to set up
- No external database required
- Fast vector search capabilities
- Perfect for prototyping and small to medium-scale applications
""")
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "rag_tutorials/agentic_rag_gpt5/agentic_rag_gpt5.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_1_agent_lifecycle_callbacks/agent.py | import os
import asyncio
from datetime import datetime
from typing import Optional
from google.adk.agents import LlmAgent
from google.adk.agents.callback_context import CallbackContext
from google.adk.runners import InMemoryRunner
from google.genai import types
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# --- 1. Define the Callback Functions ---
def before_agent_callback(callback_context: CallbackContext) -> Optional[types.Content]:
"""Callback before agent execution starts"""
agent_name = callback_context.agent_name
start_time = datetime.now()
print(f"🚀 Agent {agent_name} started at {start_time.strftime('%H:%M:%S')}")
print(f"⏰ Start time: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
print() # Add spacing
# Store start time in state for after callback
current_state = callback_context.state.to_dict()
current_state["start_time"] = start_time.isoformat()
callback_context.state.update(current_state)
return None
def after_agent_callback(callback_context: CallbackContext) -> Optional[types.Content]:
"""Callback after agent execution completes"""
agent_name = callback_context.agent_name
current_state = callback_context.state.to_dict()
# Get start time from state
start_time_str = current_state.get("start_time")
if start_time_str:
start_time = datetime.fromisoformat(start_time_str)
end_time = datetime.now()
duration = end_time - start_time
duration_seconds = duration.total_seconds()
print(f"✅ Agent {agent_name} completed")
print(f"⏱️ Duration: {duration_seconds:.2f}s")
print(f"⏰ End time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
print(f"📊 Performance: {duration_seconds:.2f}s | {agent_name}")
print() # Add spacing
return None
# --- 2. Setup Agent with Callbacks ---
llm_agent_with_callbacks = LlmAgent(
name="agent_lifecycle_demo_agent",
model="gemini-3-flash-preview",
instruction="You are a helpful assistant. Respond to user questions clearly and concisely.",
description="An LLM agent demonstrating lifecycle callbacks for monitoring",
before_agent_callback=before_agent_callback,
after_agent_callback=after_agent_callback
)
# --- 3. Setup Runner and Sessions ---
runner = InMemoryRunner(agent=llm_agent_with_callbacks, app_name="agent_lifecycle_callback_demo")
async def run_agent(message: str) -> str:
"""Run the agent with the given message"""
user_id = "demo_user"
session_id = "demo_session"
# Get the bundled session service
session_service = runner.session_service
# Get or create session
session = await session_service.get_session(
app_name="agent_lifecycle_callback_demo",
user_id=user_id,
session_id=session_id
)
if not session:
session = await session_service.create_session(
app_name="agent_lifecycle_callback_demo",
user_id=user_id,
session_id=session_id,
state={"conversation_history": []}
)
# Create user content
user_content = types.Content(
role='user',
parts=[types.Part(text=message)]
)
# Run agent and get response
response_text = ""
async for event in runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content
):
if event.is_final_response() and event.content:
response_text = event.content.parts[0].text.strip()
# Don't break here - let the loop complete naturally to ensure after_agent_callback runs
return response_text
# --- 4. Execute ---
if __name__ == "__main__":
print("\n" + "="*50 + " Agent Lifecycle Callbacks Demo " + "="*50)
# Test messages
test_messages = [
"Hello, how are you?"
]
async def test_agent():
for i, message in enumerate(test_messages, 1):
print(f"\n--- Test {i}: {message} ---")
response = await run_agent(message)
print(f"🤖 Response: {response}")
asyncio.run(test_agent()) | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_1_agent_lifecycle_callbacks/agent.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_1_agent_lifecycle_callbacks/app.py | #!/usr/bin/env python3
"""
Streamlit App for Agent Lifecycle Callbacks Demo
"""
import streamlit as st
import asyncio
from agent import llm_agent_with_callbacks, runner
from google.genai import types
# Page configuration
st.set_page_config(
page_title="Agent Lifecycle Callbacks Demo",
page_icon="🔄",
layout="wide"
)
# Title and description
st.title("🔄 Agent Lifecycle Callbacks Demo")
st.markdown("""
This demo shows how to use `before_agent_callback` and `after_agent_callback` to monitor agent execution.
Watch the console output to see the callback timing information.
""")
# Sidebar
with st.sidebar:
st.header("📊 Callback Information")
st.markdown("""
**Before Callback:**
- Records start time
- Logs agent execution start
**After Callback:**
- Calculates execution duration
- Logs completion time
""")
st.header("🔧 Technical Details")
st.markdown("""
- Uses `InMemoryRunner` for session management
- Callbacks receive `CallbackContext` with agent info
- State is shared between callbacks via session
""")
# Main chat interface
st.header("💬 Chat with Agent")
# Define the get_response function
async def get_response(prompt_text: str) -> str:
"""Run agent with the given prompt"""
user_id = "demo_user"
session_id = "demo_session"
# Get the bundled session service
session_service = runner.session_service
# Get or create session
session = await session_service.get_session(
app_name="agent_lifecycle_callback_demo",
user_id=user_id,
session_id=session_id
)
if not session:
session = await session_service.create_session(
app_name="agent_lifecycle_callback_demo",
user_id=user_id,
session_id=session_id
)
# Create user content
user_content = types.Content(
role='user',
parts=[types.Part(text=prompt_text)]
)
# Run agent and get response
response_text = ""
async for event in runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content
):
if event.is_final_response() and event.content:
response_text = event.content.parts[0].text.strip()
# Don't break - let the loop complete to ensure callbacks run
return response_text
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Ask me anything..."):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Add assistant response to chat history
with st.chat_message("assistant"):
message_placeholder = st.empty()
# Show loading message
message_placeholder.markdown("🤔 Thinking...")
# Get response
response = asyncio.run(get_response(prompt))
# Update placeholder with response
message_placeholder.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
# Quick test buttons
st.markdown("---")
st.header("⚡ Quick Tests")
col1, col2, col3 = st.columns(3)
with col1:
if st.button("👋 Greeting Test"):
with st.chat_message("user"):
st.markdown("Hello, how are you?")
with st.chat_message("assistant"):
with st.spinner("🤖 Agent is processing..."):
response = asyncio.run(get_response("Hello, how are you?"))
st.markdown(response)
with col2:
if st.button("🧮 Math Test"):
with st.chat_message("user"):
st.markdown("What's 2 + 2?")
with st.chat_message("assistant"):
with st.spinner("🤖 Agent is processing..."):
response = asyncio.run(get_response("What's 2 + 2?"))
st.markdown(response)
with col3:
if st.button("😄 Joke Test"):
with st.chat_message("user"):
st.markdown("Tell me a short joke")
with st.chat_message("assistant"):
with st.spinner("🤖 Agent is processing..."):
response = asyncio.run(get_response("Tell me a short joke"))
st.markdown(response)
# Clear chat button
if st.button("🗑️ Clear Chat"):
st.session_state.messages = []
st.rerun()
# Footer
st.markdown("---")
st.markdown("""
<div style='text-align: center; color: #666;'>
<p>Check the console/terminal for callback timing information</p>
</div>
""", unsafe_allow_html=True) | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_1_agent_lifecycle_callbacks/app.py",
"license": "Apache License 2.0",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_2_llm_interaction_callbacks/agent.py | #!/usr/bin/env python3
"""
LLM Interaction Callbacks Demo
Simple agent that demonstrates LLM request/response monitoring
"""
import os
from datetime import datetime
from typing import Optional
from google.adk.agents import LlmAgent
from google.adk.agents.callback_context import CallbackContext
from google.adk.runners import InMemoryRunner
from google.genai import types
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
def before_model_callback(callback_context: CallbackContext, llm_request) -> Optional[types.Content]:
"""Callback before LLM request is made"""
agent_name = callback_context.agent_name
request_time = datetime.now()
# Extract model and prompt from llm_request
model = getattr(llm_request, 'model', 'unknown')
# Extract full prompt text from llm_request contents
prompt_text = "unknown"
if hasattr(llm_request, 'contents') and llm_request.contents:
for content in llm_request.contents:
if hasattr(content, 'parts') and content.parts:
for part in content.parts:
if hasattr(part, 'text') and part.text:
prompt_text = part.text
break
if prompt_text != "unknown":
break
print(f"🤖 LLM Request to {model}")
print(f"⏰ Request time: {request_time.strftime('%H:%M:%S')}")
print(f"📋 Agent: {agent_name}")
print() # Add spacing
# Store request info in state for after callback
current_state = callback_context.state.to_dict()
current_state["llm_request_time"] = request_time.isoformat()
current_state["llm_model"] = model
current_state["llm_prompt_length"] = len(prompt_text)
callback_context.state.update(current_state)
# Return None to allow normal execution
return None
def after_model_callback(callback_context: CallbackContext, llm_response) -> Optional[types.Content]:
"""Callback after LLM response is received"""
agent_name = callback_context.agent_name
current_state = callback_context.state.to_dict()
# Extract response info
response_text = str(llm_response) if llm_response else 'unknown'
model = current_state.get("llm_model", "unknown")
# Extract token count from usage_metadata
tokens = 0
if llm_response and hasattr(llm_response, 'usage_metadata') and llm_response.usage_metadata:
tokens = getattr(llm_response.usage_metadata, 'total_token_count', 0)
# Get request time from state
request_time_str = current_state.get("llm_request_time")
if request_time_str:
request_time = datetime.fromisoformat(request_time_str)
duration = datetime.now() - request_time
duration_seconds = duration.total_seconds()
else:
duration_seconds = 0
print(f"📝 LLM Response from {model}")
print(f"⏱️ Duration: {duration_seconds:.2f}s")
print(f"🔢 Tokens: {tokens}")
# Calculate estimated cost for Gemini 3 Flash
# Pricing: $2.50 per 1M output tokens (including thinking tokens)
cost_per_1k_output = 0.0025 # $2.50 per 1M = $0.0025 per 1K
estimated_cost = (tokens / 1000) * cost_per_1k_output
print(f"💰 Estimated cost: ${estimated_cost:.4f}")
print() # Add spacing
# Return None to use the original response
return None
# Create agent with LLM callbacks
root_agent = LlmAgent(
name="llm_monitor_agent",
model="gemini-3-flash-preview",
description="Agent with LLM interaction monitoring",
instruction="""
You are a helpful assistant with LLM monitoring.
Your role is to:
- Provide clear, informative responses
- Keep responses concise but comprehensive
- Demonstrate the LLM callback system
The system will automatically track:
- Your requests to the LLM model
- Response times and token usage
- Estimated API costs
Focus on being helpful while showing the monitoring capabilities.
""",
before_model_callback=before_model_callback,
after_model_callback=after_model_callback
)
# Create runner for agent execution
runner = InMemoryRunner(agent=root_agent, app_name="llm_monitor_app")
async def run_agent(message: str) -> str:
"""Run the agent with the given message"""
user_id = "demo_user"
session_id = "demo_session"
# Get the bundled session service
session_service = runner.session_service
# Get or create session
session = await session_service.get_session(
app_name="llm_monitor_app",
user_id=user_id,
session_id=session_id
)
if not session:
session = await session_service.create_session(
app_name="llm_monitor_app",
user_id=user_id,
session_id=session_id,
state={"conversation_history": []}
)
# Create user content
user_content = types.Content(
role='user',
parts=[types.Part(text=message)]
)
# Run agent and get response
response_text = ""
async for event in runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content
):
if event.is_final_response() and event.content:
response_text = event.content.parts[0].text.strip()
# Don't break here - let the loop complete naturally to ensure callbacks run
return response_text
if __name__ == "__main__":
import asyncio
# Test the agent
print("🧪 Testing LLM Interaction Callbacks")
print("=" * 50)
test_messages = [
"Explain quantum computing in simple terms",
"Write a short poem about AI",
"What are the benefits of renewable energy?"
]
async def test_agent():
for message in test_messages:
print(f"\n🤖 User: {message}")
response = await run_agent(message)
print(f"🤖 Agent: {response}")
print("-" * 50)
asyncio.run(test_agent()) | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_2_llm_interaction_callbacks/agent.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_2_llm_interaction_callbacks/app.py | #!/usr/bin/env python3
"""
Streamlit App for LLM Interaction Callbacks Demo
"""
import streamlit as st
import sys
import os
import asyncio
from agent import run_agent
# Page configuration
st.set_page_config(
page_title="LLM Interaction Callbacks",
page_icon="🤖",
layout="wide"
)
# Title and description
st.title("🤖 LLM Interaction Callbacks Demo")
st.markdown("""
This demo shows how to monitor LLM requests and responses using callbacks.
Watch the console output to see detailed LLM interaction tracking!
""")
# Sidebar with information
with st.sidebar:
st.header("📊 LLM Monitoring")
st.markdown("""
**Request Callback**: Triggered when LLM request is sent
- Logs model name and prompt
- Records request timestamp
- Tracks prompt length
**Response Callback**: Triggered when LLM response is received
- Calculates response duration
- Tracks token usage
- Estimates API costs
""")
# Main chat interface
st.header("💬 Chat with LLM Monitor")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Ask me something..."):
# Add user message to chat
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Get agent response
with st.chat_message("assistant"):
with st.spinner("🤖 LLM is processing..."):
response = asyncio.run(run_agent(prompt))
st.markdown(response)
# Add assistant response to chat
st.session_state.messages.append({"role": "assistant", "content": response})
# Quick test buttons
st.markdown("---")
st.header("⚡ Quick Tests")
col1, col2, col3 = st.columns(3)
with col1:
if st.button("🔬 Science Test"):
with st.chat_message("user"):
st.markdown("Explain quantum computing in simple terms")
with st.chat_message("assistant"):
with st.spinner("🤖 LLM is processing..."):
response = asyncio.run(run_agent("Explain quantum computing in simple terms"))
st.markdown(response)
with col2:
if st.button("📝 Poetry Test"):
with st.chat_message("user"):
st.markdown("Write a short poem about AI")
with st.chat_message("assistant"):
with st.spinner("🤖 LLM is processing..."):
response = asyncio.run(run_agent("Write a short poem about AI"))
st.markdown(response)
with col3:
if st.button("🌍 Environment Test"):
with st.chat_message("user"):
st.markdown("What are the benefits of renewable energy?")
with st.chat_message("assistant"):
with st.spinner("🤖 LLM is processing..."):
response = asyncio.run(run_agent("What are the benefits of renewable energy?"))
st.markdown(response)
# Clear chat button
if st.button("🗑️ Clear Chat History"):
st.session_state.messages = []
st.rerun()
# Information about callbacks
st.markdown("---")
st.header("📋 LLM Callback Output")
st.markdown("""
**Check your console/terminal** to see the LLM interaction output:
```
🤖 LLM Request to gemini-3-flash-preview
⏰ Request time: 10:30:15
📋 Agent: llm_monitor_agent
📝 LLM Response from gemini-3-flash-preview
⏱️ Duration: 1.45s
🔢 Tokens: 156
💰 Estimated cost: $0.0004
```
""")
# Footer
st.markdown("---")
st.markdown("*Watch the console output to see LLM interaction callbacks in action!*") | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_2_llm_interaction_callbacks/app.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_3_tool_execution_callbacks/app.py | #!/usr/bin/env python3
"""
Streamlit App for Tool Execution Callbacks Demo
"""
import streamlit as st
import sys
import os
import asyncio
from agent import run_agent
# Page configuration
st.set_page_config(
page_title="Tool Execution Callbacks",
page_icon="🔧",
layout="wide"
)
# Title and description
st.title("🔧 Tool Execution Callbacks Demo")
st.markdown("""
This demo shows how to monitor tool execution using callbacks.
Watch the console output to see detailed tool execution tracking!
""")
# Sidebar with information
with st.sidebar:
st.header("📊 Tool Execution Monitoring")
st.markdown("""
**Before Tool Callback**
- Triggered when a tool starts execution
- Logs tool name and input parameters
- Records agent name
- Stores start time for duration tracking
**After Tool Callback**
- Triggered when a tool finishes execution
- Logs tool result
- Calculates and displays execution duration
- Handles errors (e.g., division by zero)
""")
st.markdown("---")
st.markdown("### 🧮 Available Tools")
st.markdown("""
**Calculator Tool**:
- Addition: `add`
- Subtraction: `subtract`
- Multiplication: `multiply`
- Division: `divide`
""")
# Main chat interface
st.header("💬 Chat with Tool Monitor")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Ask me to calculate something..."):
# Add user message to chat
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Get agent response
with st.chat_message("assistant"):
with st.spinner("🔧 Tool is executing..."):
response = asyncio.run(run_agent(prompt))
st.markdown(response)
# Add assistant response to chat
st.session_state.messages.append({"role": "assistant", "content": response})
# Quick test buttons
st.markdown("---")
st.header("⚡ Quick Tests")
col1, col2, col3 = st.columns(3)
with col1:
if st.button("➕ Addition Test"):
test_message = "Calculate 15 + 27"
st.session_state.messages.append({"role": "user", "content": test_message})
with st.chat_message("user"):
st.markdown(test_message)
with st.chat_message("assistant"):
with st.spinner("🔧 Tool is executing..."):
response = asyncio.run(run_agent(test_message))
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
with col2:
if st.button("➗ Division Test"):
test_message = "What is 100 divided by 4?"
st.session_state.messages.append({"role": "user", "content": test_message})
with st.chat_message("user"):
st.markdown(test_message)
with st.chat_message("assistant"):
with st.spinner("🔧 Tool is executing..."):
response = asyncio.run(run_agent(test_message))
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
with col3:
if st.button("❌ Error Test"):
test_message = "Calculate 10 divided by 0"
st.session_state.messages.append({"role": "user", "content": test_message})
with st.chat_message("user"):
st.markdown(test_message)
with st.chat_message("assistant"):
with st.spinner("🔧 Tool is executing..."):
response = asyncio.run(run_agent(test_message))
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
# Clear chat button
if st.button("🗑️ Clear Chat History"):
st.session_state.messages = []
st.rerun()
# Information about callbacks
st.markdown("---")
st.header("📋 Tool Callback Output")
st.markdown("""
**Check your console/terminal** to see the tool execution output:
```
🔧 Tool calculator_tool started
📝 Parameters: {'operation': 'add', 'a': 15.0, 'b': 27.0}
📋 Agent: tool_execution_demo_agent
✅ Tool calculator_tool completed
⏱️ Duration: 0.0012s
📄 Result: 15 + 27 = 42
```
""")
# Footer
st.markdown("---")
st.markdown("*Watch the console output to see tool execution callbacks in action!*") | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_3_tool_execution_callbacks/app.py",
"license": "Apache License 2.0",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/budget.py | from agno.agent import Agent
from config.llm import model
budget_agent = Agent(
name="Budget Optimizer",
role="Calculate costs and optimize travel budgets when asked by team leader",
model=model,
description="You research costs, compare prices, and optimize travel budgets when assigned by the team leader. When plans exceed budget, you suggest strategic adjustments to bring costs in line while preserving the core travel experience.",
instructions=[
"# Budget Optimization Instructions",
"",
"1. Analyze total budget and cost requirements:",
" - Review total budget limit",
" - Calculate costs for transportation, accommodations, activities, food",
" - Identify any components exceeding budget",
"",
"2. If over budget, suggest cost-saving alternatives:",
" - Alternative accommodations or locations",
" - Different transportation options",
" - Mix of premium and budget experiences",
" - Free or lower-cost activity substitutes",
" - Budget-friendly dining recommendations",
"",
"3. Research and recommend money-saving strategies:",
" - Early booking discounts",
" - Package deals",
" - Off-peak pricing",
" - Local passes and discount cards",
"",
"4. Present clear budget breakdown showing:",
" - Original vs optimized costs",
" - Specific savings per category",
" - Alternative options",
" - Hidden cost warnings",
"",
"Format all amounts in user's preferred currency with clear comparisons between original and optimized budgets.",
],
markdown=True,
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/budget.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/destination.py | from agno.agent import Agent
from agno.tools.exa import ExaTools
from agno.tools.firecrawl import FirecrawlTools
from config.llm import model
destination_agent = Agent(
name="Destination Explorer",
model=model,
tools=[
ExaTools(
num_results=10,
),
],
description="You are a destination research agent that focuses on recommending mainstream tourist attractions and classic experiences that most travelers would enjoy. You prioritize well-known landmarks and popular activities while keeping recommendations general and widely appealing.",
instructions=[
"1. Focus on mainstream attractions with thoughtful guidance:",
" - Famous landmarks and monuments",
" - Popular tourist spots",
" - Well-known museums",
" - Classic shopping areas",
" - Common tourist activities",
"",
"2. Guide visitors with simple reasoning:",
" - Suggest crowd-pleasing activities",
" - Focus on family-friendly locations",
" - Recommend proven tourist routes",
" - Include popular photo spots",
"",
"3. Present clear attraction information:",
" - Simple description",
" - General location",
" - Regular opening hours",
" - Standard entrance fees",
" - Typical visit duration",
" - Basic visitor tips",
"",
"4. Organize information logically:",
" - Main attractions first",
" - Common day trips",
" - Standard tourist areas",
" - Popular activities",
"",
"Use tools to find and verify tourist information.",
"Keep suggestions general and widely appealing.",
],
expected_output="""
# Tourist Guide
## Main Attractions
List of most popular tourist spots
## Common Activities
Standard tourist activities and experiences
## Popular Areas
Well-known districts and neighborhoods
## Basic Information
- General visiting tips
- Common transportation options
- Standard tourist advice
""",
markdown=True,
show_tool_calls=True,
add_datetime_to_instructions=True,
retries=3,
delay_between_retries=2,
exponential_backoff=True,
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/destination.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/flight.py | from agno.agent import Agent
from agno.tools.firecrawl import FirecrawlTools
from tools.google_flight import get_google_flights
from config.llm import model
flight_search_agent = Agent(
name="Flight Search Assistant",
model=model,
tools=[
# FirecrawlTools(poll_interval=10),
# kayak_flight_url_generator,
get_google_flights,
],
instructions=[
"You are a sophisticated flight search and analysis assistant for comprehensive travel planning. For any user query:",
"1. Parse complete flight requirements including:",
" - Origin and destination cities",
" - Travel dates (outbound and return)",
" - Number of travelers (adults, children, infants)",
" - Preferred cabin class",
" - Any specific airlines or routing preferences",
" - Budget constraints if specified",
# "2. Search and analyze multiple flight options:",
"2. Search for flight options:",
# " - Use kayak_url_generator to create appropriate search URLs",
# " - Navigate to and extract data from flight search results",
" - Use get_google_flights to get flight results",
" - Consider both direct and connecting flights",
" - Compare different departure times and airlines",
"3. For each viable flight option, extract:",
" - Complete pricing breakdown (base fare, taxes, total)",
" - Flight numbers and operating airlines",
" - Detailed timing (departure, arrival, duration, layovers)",
" - Aircraft types and amenities when available",
" - Baggage allowance and policies",
"4. Organize and present options with focus on:",
" - Best value for money",
" - Convenient timing and minimal layovers",
" - Reliable airlines with good service records",
" - Flexibility and booking conditions",
"5. Provide practical recommendations considering:",
" - Price trends and booking timing",
" - Alternative dates or nearby airports if beneficial",
" - Loyalty program benefits if applicable",
" - Special requirements (extra legroom, dietary, etc.)",
"6. Include booking guidance:",
" - Direct booking links when available",
" - Fare rules and change policies",
" - Required documents and visa implications",
# "7. Always close browser sessions after completion",
],
expected_output="""
All flight details with the following fields:
- flight_number (str): The flight number of the flight
- price (str): The price of the flight
- airline (str): The airline of the flight
- departure_time (str): The departure time of the flight
- arrival_time (str): The arrival time of the flight
- duration (str): The duration of the flight
- stops (int): The number of stops of the flight
""",
markdown=True,
show_tool_calls=True,
debug_mode=True,
retries=3,
delay_between_retries=2,
exponential_backoff=True,
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/flight.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/food.py | from agno.tools.exa import ExaTools
from config.llm import model
from agno.agent import Agent
dining_agent = Agent(
name="Culinary Guide",
role="Research dining and food experiences when asked by team leader",
model=model,
tools=[ExaTools()],
description="You research restaurants, food markets, culinary experiences, and dining options when assigned by the team leader.",
instructions=[
"# Culinary Research and Recommendation Assistant",
"",
"## Task 1: Query Processing",
"- Parse dining preferences from user query",
"- Extract:",
" - Location/area",
" - Cuisine preferences",
" - Dietary restrictions",
" - Budget range",
" - Meal timing",
" - Group size",
" - Special requirements (e.g., family-friendly, romantic)",
"",
"## Task 2: Research & Data Collection",
"- Search for restaurants and food experiences using ExaTools",
"- Gather information about:",
" - Local cuisine specialties",
" - Popular food markets",
" - Culinary experiences",
" - Operating hours",
" - Price ranges",
" - Reservation policies",
"",
"## Task 3: Content Analysis",
"- Analyze restaurant reviews and ratings",
"- Evaluate:",
" - Food quality",
" - Service standards",
" - Ambiance",
" - Value for money",
" - Dietary accommodation",
" - Family-friendliness",
"",
"## Task 4: Data Processing",
"- Filter results based on:",
" - Dietary requirements",
" - Budget constraints",
" - Location preferences",
" - Special requirements",
"- Validate information completeness",
"",
"## Task 5: Results Presentation",
"Present recommendations in a clear, organized format:",
"",
"### Restaurant Recommendations",
"For each restaurant, include:",
"- Name and cuisine type",
"- Price range (e.g., $, $$, $$$)",
"- Rating and brief review summary",
"- Location and accessibility",
"- Operating hours",
"- Dietary options available",
"- Special features (e.g., outdoor seating, view)",
"- Reservation requirements",
"- Popular dishes to try",
"",
"### Food Markets & Culinary Experiences",
"- Market names and specialties",
"- Best times to visit",
"- Must-try local foods",
"- Cultural significance",
"",
"### Additional Information",
"- Local food customs and etiquette",
"- Peak dining hours to avoid",
"- Transportation options",
"- Food safety tips",
"",
"Format the output in clear sections with emojis and bullet points for better readability.",
],
expected_output="""
Present dining recommendations in a clear, organized format with the following sections:
# 🍽️ Restaurant Recommendations
For each recommended restaurant:
- Name and cuisine type
- Price range and value rating
- Location and accessibility
- Operating hours
- Dietary options
- Special features
- Popular dishes
- Reservation info
# 🛍️ Food Markets & Experiences
- Market names and specialties
- Best visiting times
- Local food highlights
- Cultural significance
# ℹ️ Additional Information
- Local customs
- Peak hours
- Transportation
- Safety tips
Use emojis and clear formatting for better readability.
""",
markdown=True,
show_tool_calls=True,
debug_mode=True,
retries=3,
delay_between_retries=2,
exponential_backoff=True,
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/food.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/hotel.py | from agno.agent import Agent
from tools.kayak_hotel import kayak_hotel_url_generator
from tools.scrape import scrape_website
from config.llm import model
from models.hotel import HotelResult, HotelResults
hotel_search_agent = Agent(
name="Hotel Search Assistant",
model=model,
tools=[
scrape_website,
kayak_hotel_url_generator,
],
instructions=[
"# Hotel Search and Data Extraction Assistant",
"",
"## Task 1: Query Processing",
"- Parse hotel search parameters from user query",
"- Extract:",
" - Destination",
" - Check-in/out dates",
" - Number of guests (adults, children)",
" - Room requirements",
" - Budget constraints",
" - Preferred amenities",
" - Location preferences",
"",
"## Task 2: URL Generation & Initial Scraping",
"- Generate Kayak URL using `kayak_hotel_url_generator`",
"- Perform initial content scrape with `scrape_website`",
"- Handle URL encoding for special characters in destination names",
"",
"## Task 3: Data Extraction",
"- Parse hotel listings from scraped content",
"- Extract key details:",
" - Prices (including taxes and fees)",
" - Amenities (especially family-friendly features)",
" - Ratings and reviews",
" - Location details",
" - Room types and availability",
" - Cancellation policies",
"- Handle dynamic loading of results",
"- Navigate multiple pages if needed",
"",
"## Task 4: Data Processing",
"- Structure extracted hotel data according to HotelResult model",
"- Validate data completeness",
"- Filter results based on:",
" - Budget constraints",
" - Required amenities",
" - Location preferences",
" - Family-friendly features",
"",
"## Task 5: Results Presentation",
"- Format results clearly with:",
" - Hotel name and rating",
" - Price breakdown",
" - Location and accessibility",
" - Key amenities",
" - Family-friendly features",
" - Booking policies",
"- Sort results by relevance to user preferences",
"- Include direct booking links",
"",
],
expected_output="""
List of hotels with the following fields for each hotel:
- hotel_name (str): The name of the hotel
- price (str): The price of the hotel
- rating (str): The rating of the hotel
- address (str): The address of the hotel
- amenities (List[str]): The amenities of the hotel
- description (str): The description of the hotel
- url (str): The url of the hotel
""",
markdown=True,
show_tool_calls=True,
debug_mode=True,
retries=3,
delay_between_retries=2,
exponential_backoff=True,
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/hotel.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/itinerary.py | from agno.agent import Agent
from agno.tools.exa import ExaTools
from agno.tools.firecrawl import FirecrawlTools
from agno.tools.reasoning import ReasoningTools
from config.llm import model
from typing import Optional
from datetime import datetime, timedelta
from textwrap import dedent
itinerary_agent = Agent(
name="Itinerary Specialist",
model=model,
tools=[
ExaTools(num_results=8),
FirecrawlTools(formats=["markdown"]),
ReasoningTools(add_instructions=True),
],
markdown=True,
description=dedent(
"""\
You are a master itinerary creator with expertise in crafting detailed, perfectly-timed daily travel plans.
You turn abstract travel details into structured, hour-by-hour plans that maximize enjoyment while maintaining
a realistic pace. You're skilled at adapting schedules to match traveler preferences, weather conditions,
opening hours, and local customs. Your itineraries are practical, thoroughly researched, and full of
insider timing tips that make travel smooth and stress-free."""
),
instructions=[
"1. Create perfectly balanced day-by-day itineraries with meticulous timing:",
" - Structure each day into morning, afternoon, and evening blocks",
" - Include exact timing for each activity (start/end times)",
" - Account for realistic travel times between locations",
" - Balance sightseeing with leisure and rest periods",
" - Adapt pace to match traveler preferences (relaxed, moderate, fast)",
"",
"2. Ensure practical logistics in all schedules:",
" - Verify operating hours for all attractions, restaurants, and services",
" - Account for common delays (security lines, crowds, traffic)",
" - Include buffer time between activities",
" - Check for day-specific closures (weekends, holidays, seasonal)",
" - Consider local transportation options and schedules",
"",
"3. Optimize activity timing with expert knowledge:",
" - Schedule visits during off-peak hours when possible",
" - Plan indoor activities during likely rainy/hot periods",
" - Arrange sunrise/sunset experiences at optimal times",
" - Schedule meals during traditional local dining hours",
" - Time activities to avoid rush hour transportation",
"",
"4. Create custom scheduling for specific traveler types:",
" - Families: Include kid-friendly breaks and early dinners",
" - Seniors: More relaxed pace with ample rest periods",
" - Young adults: Later start times and evening activities",
" - Luxury travelers: Timing for exclusive experiences",
" - Business travelers: Efficient scheduling around work commitments",
"",
"5. Enhance itineraries with practical timing details:",
" - Best arrival times to avoid lines at attractions",
" - Photography timing for optimal lighting",
" - Meal reservations timed around activities",
" - Shopping hours for local markets and stores",
" - Weather-dependent backup plans",
"",
"6. Research tools usage for accurate scheduling:",
" - Use Exa to research location-specific timing information",
" - Employ FirecrawlTools for current operating hours and conditions",
" - Use ReasoningTools to optimize activity sequence and timing",
"",
"7. Format day plans with maximum clarity:",
" - Use clear time blocks (8:00 AM - 9:30 AM)",
" - Include travel method and duration between locations",
" - Highlight reservation times and booking requirements",
" - Note required advance arrival times (security, check-in)",
" - Use emojis for better visual organization",
],
expected_output=dedent(
"""\
# Detailed Itinerary: {Destination} ({Start Date} - {End Date})
## Trip Overview
- **Dates**: {exact dates with day count}
- **Travelers**: {number and type}
- **Pace**: {relaxed/moderate/fast}
- **Style**: {luxury/mid-range/budget}
- **Priorities**: {key interests and goals}
## Day 1: {Day of Week}, {Date}
### Morning
- **7:00 AM - 8:00 AM**: Breakfast at {location}
- **8:30 AM - 10:30 AM**: {Activity} at {location}
* Notes: {special instructions, timing tips}
* Travel: {transport method, duration}
- **11:00 AM - 12:30 PM**: {Activity} at {location}
* Notes: {special instructions, timing tips}
* Travel: {transport method, duration}
### Afternoon
- **1:00 PM - 2:00 PM**: Lunch at {location}
- **2:30 PM - 4:30 PM**: {Activity} at {location}
* Notes: {special instructions, timing tips}
* Travel: {transport method, duration}
- **5:00 PM - 6:00 PM**: Rest/refresh at hotel
### Evening
- **7:00 PM - 8:30 PM**: Dinner at {location}
- **9:00 PM - 10:30 PM**: {Activity} at {location}
* Notes: {special instructions, timing tips}
* Travel: {transport method, duration}
## Day 2: {Day of Week}, {Date}
[Similar detailed breakdown]
[Continue for each day of the trip]
## Practical Notes
- **Weather Considerations**: {weather-related timing adjustments}
- **Transportation Tips**: {local transport timing advice}
- **Reservation Reminders**: {all pre-booked times}
- **Backup Plans**: {alternative schedules for weather/closures}
"""
),
add_datetime_to_instructions=True,
show_tool_calls=True,
retries=2,
delay_between_retries=2,
exponential_backoff=True,
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/itinerary.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/team.py | from agno.team.team import Team
from config.llm import model, model2
from agents.destination import destination_agent
from agents.hotel import hotel_search_agent
from agents.food import dining_agent
from agents.budget import budget_agent
from agents.flight import flight_search_agent
from agents.itinerary import itinerary_agent
from loguru import logger
from agno.tools.reasoning import ReasoningTools
# def update_team_current_state(team: Team, state: str) -> str:
# """
# This function is used to set the current state of the team.
# """
# logger.info(f"The current state of the team is {state}")
# team.session_state["current_state"] = state
# return state
trip_planning_team = Team(
name="TripCraft AI Team",
mode="coordinate",
model=model,
tools=[ReasoningTools(add_instructions=True)],
members=[
destination_agent,
hotel_search_agent,
dining_agent,
budget_agent,
flight_search_agent,
itinerary_agent,
],
show_tool_calls=True,
markdown=True,
description=(
"You are the lead orchestrator of the TripCraft AI planning team. "
"Your mission is to transform the user's travel preferences into a magical, stress-free itinerary. "
"Based on a single input form, you'll collaborate with expert agents handling flights, stays, dining, activities, and budgeting. "
"The result should be a beautifully crafted, practical, and emotionally resonant travel plan that feels personally designed. "
"Every detail matters - from the exact timing of activities to the ambiance of recommended restaurants. "
"Your goal is to create an itinerary so thorough and thoughtful that it feels like having a personal travel concierge."
),
instructions=[
"1. Meticulously analyze the complete travel preferences from the user input:",
" - Primary destination and any secondary locations",
" - Exact travel dates including arrival and departure times",
" - Preferred pace (relaxed, moderate, or fast-paced) with specific timing preferences",
" - Travel style (luxury, mid-range, budget) with detailed expectations",
" - Budget range with currency and flexibility notes",
" - Companion details (solo, couple, family, friends) with group dynamics",
" - Accommodation requirements (room types, amenities, location preferences)",
" - Desired vibes (romantic, adventurous, relaxing, etc.) with specific examples",
" - Top priorities (Instagram spots, local experiences, food, shopping) ranked by importance",
" - Special interests, dietary restrictions, accessibility needs",
" - Previous travel experiences and preferences",
"",
"2. Transportation Planning:",
" - Map out exact routes from start location to all destinations",
" - Research optimal flight/train combinations considering:",
" • Departure/arrival times aligned with check-in/out times",
" • Layover durations and airport transfer times",
" • Airline alliance benefits and baggage policies",
" • Alternative airports and routes for cost optimization",
" - Plan local transportation between all points of interest",
"",
"3. Coordinate with Specialized Agents:",
" - Flight Agent: Detailed air travel options with timing and pricing",
" - Hotel Agent: Accommodation matches for each night with amenity details",
" - Dining Agent: Restaurant recommendations with cuisine, price, and ambiance",
" - Activity Agent: Curated experiences matching interests and pace",
" - Budget Agent: Cost optimization while maintaining experience quality",
"",
"4. Create Detailed Daily Schedules:",
" Morning (6am-12pm):",
" - Breakfast venues with opening hours and signature dishes",
" - Morning activities with exact durations and travel times",
" - Alternative options for weather contingencies",
"",
" Afternoon (12pm-6pm):",
" - Lunch recommendations with peak times and reservation needs",
" - Main sightseeing with entrance fees and skip-the-line options",
" - Rest periods aligned with pace preference",
"",
" Evening (6pm-midnight):",
" - Dinner venues with ambiance descriptions and dress codes",
" - Evening entertainment options",
" - Nightlife suggestions if requested",
"",
"5. Experience Enhancement:",
" - Research and highlight hidden gems matching user interests",
" - Identify unique local experiences with cultural significance",
" - Find Instagram-worthy locations with best photo times",
" - Source exclusive or unusual accommodation options",
" - Map romantic spots for couples or family-friendly venues",
"",
"6. Budget Management:",
" - Break down costs to the smallest detail:",
" • Transportation (flights, trains, taxis, public transit)",
" • Accommodations (nightly rates, taxes, fees)",
" • Activities (tickets, guides, equipment rentals)",
" • Meals (by venue type and meal time)",
" • Shopping allowance",
" • Emergency buffer",
" - Provide cost-saving alternatives while maintaining experience quality",
" - Consider seasonal pricing variations",
"",
"7. Research Tools Usage:",
" - Use Exa for deep destination research including:",
" • Seasonal events and festivals",
" • Local customs and etiquette",
" • Weather patterns and best visit times",
" - Employ Firecrawl for real-time data on:",
" • Venue reviews and ratings",
" • Current pricing and availability",
" • Booking platforms and deals",
"",
"8. Personalization Elements:",
" - Reference and incorporate past travel experiences",
" - Avoid previously visited locations unless requested",
" - Match recommendations to stated preferences",
" - Add personal touches based on special occasions or interests",
"",
"9. Final Itinerary Crafting:",
" - Ensure perfect flow between all elements",
" - Include buffer time for transitions",
" - Add local tips and insider knowledge",
" - Provide backup options for key elements",
" - Format for both inspiration and practical use",
],
expected_output="""
A meticulously detailed, day-by-day travel itinerary in Markdown format including:
**I. Executive Summary**
- 🎯 Trip Purpose & Vision
• Primary goals and desired experiences
• Special occasions or celebrations
• Key preferences and must-haves
- ✈️ Travel Overview
• Exact dates with day count
• All destinations in sequence
• Group composition and dynamics
• Overall style and pace
• Total budget range and currency
- 💫 Experience Highlights
• Signature moments and unique experiences
• Special arrangements and exclusives
• Instagram-worthy locations
• Cultural immersion opportunities
**II. Travel Logistics**
- 🛫 Outbound Journey
• Flight/train details with exact timings
• Carrier information and booking references
• Seat recommendations
• Baggage allowances and restrictions
• Airport/station transfer details
• Check-in instructions
- 🛬 Return Journey
• Return transportation specifics
• Timing coordination with checkout
• Alternative options if available
**III. Detailed Daily Itinerary**
For each day (e.g., "Day 1 - Monday, July 1, 2025"):
- 🌅 Morning (6am-12pm)
• Wake-up time and morning routine
• Breakfast venue with menu highlights
• Morning activities with durations
• Transport between locations
• Tips for timing and crowds
- ☀️ Afternoon (12pm-6pm)
• Lunch recommendations with price range
• Main activities and experiences
• Rest periods and flexibility
• Photo opportunities
• Indoor/outdoor alternatives
- 🌙 Evening (6pm-onwards)
• Dinner reservations and details
• Evening entertainment
• Nightlife options if desired
• Transport back to accommodation
- 🏨 Accommodation
• Property name and room type
• Check-in/out times
• Key amenities and features
• Location benefits
• Booking confirmation details
- 📝 Daily Notes
• Weather considerations
• Dress code requirements
• Advance bookings needed
• Local customs and tips
• Emergency contacts
**IV. Accommodation Details**
For each property:
- 📍 Location & Access
• Exact address and coordinates
• Transport options and costs
• Surrounding area highlights
• Distance to key attractions
- 🛎️ Property Features
• Room types and views
• Included amenities
• Dining options
• Special services
• Unique selling points
- 💰 Costs & Booking
• Nightly rates and taxes
• Additional fees
• Cancellation policy
• Payment methods
• Booking platform links
**V. Curated Experiences**
- 🎭 Activities & Attractions
• Name and description
• Operating hours and duration
• Admission fees
• Booking requirements
• Insider tips
• Alternative options
• Accessibility notes
- 🍽️ Dining Experiences
• Restaurant details and cuisine
• Price ranges and menu highlights
• Ambiance and dress code
• Reservation policies
• Signature dishes
• Dietary accommodation
• View/seating recommendations
**VI. Comprehensive Budget**
- 💵 Total Trip Cost
• Grand total in user's currency
• Exchange rates used
• Payment timeline
- 📊 Detailed Breakdown
• Transportation
- Flights/trains
- Local transport
- Airport transfers
• Accommodations
- Nightly rates
- Taxes and fees
- Extra services
• Activities
- Admission fees
- Guide costs
- Equipment rental
• Dining
- Breakfast allowance
- Lunch budget
- Dinner budget
- Drinks/snacks
• Shopping & Souvenirs
• Emergency Fund
• Optional Upgrades
**VII. Essential Information**
- 📋 Pre-Trip Preparation
• Visa requirements
• Health and insurance
• Packing recommendations
• Weather forecasts
• Currency exchange tips
- 🗺️ Destination Guide
• Local customs and etiquette
• Language basics
• Emergency contacts
• Medical facilities
• Shopping areas
• Local transport options
- 📱 Digital Resources
• Useful apps
• Booking confirmations
• Maps and directions
• Restaurant reservations
• Activity tickets
- ⚠️ Contingency Plans
• Weather alternatives
• Backup restaurants
• Emergency contacts
• Travel insurance details
• Cancellation policies
Format the entire itinerary with:
• Clear section headers
• Consistent emoji usage
• Bullet points and sub-bullets
• Tables where appropriate
• Highlighted important information
• Links to all bookings and reservations
• Day-specific weather forecasts
• Local emergency numbers
• Relevant photos and maps
""",
success_criteria=[
"✅ Complete itinerary with all travel days and activities",
"✅ Stays within budget constraints",
"✅ Matches user priorities and travel style",
"✅ Well-structured daily schedule matching user's pace",
"✅ Real flights and accommodations with costs and links",
"✅ Daily activities aligned with selected vibes",
"✅ Clear Markdown format with good visuals",
"✅ Realistic budget breakdown",
"✅ Personalized tips based on user profile",
"✅ Verified, real-world locations only",
],
enable_agentic_context=True,
share_member_interactions=True,
show_members_responses=True,
add_datetime_to_instructions=True,
add_member_tools_to_system_message=True,
# debug_mode=True,
telemetry=False,
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/agents/team.py",
"license": "Apache License 2.0",
"lines": 307,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/api/app.py | from fastapi import FastAPI, APIRouter
from fastapi.middleware.cors import CORSMiddleware
from loguru import logger
from datetime import datetime, timezone
from contextlib import asynccontextmanager
from services.db_service import initialize_db_pool, close_db_pool
from router.plan import router as plan_router
router = APIRouter(prefix="/api")
@router.get("/health", summary="API Health Check")
async def health_check():
logger.debug("Health check requested")
return {"status": "healthy", "timestamp": datetime.now(timezone.utc).isoformat()}
@asynccontextmanager
async def lifespan(app: FastAPI):
# Startup logic
logger.info("API server started")
# Initialize database connection pool
logger.info("Initializing database connection pool")
await initialize_db_pool()
logger.info("Database connection pool initialized")
yield
# Shutdown logic
# Close database connection pool
logger.info("Closing database connection pool")
await close_db_pool()
logger.info("API server shutting down")
app = FastAPI(
title="TripCraft AI API",
description="API for running intelligent trip planning in the background",
version="0.1.0",
lifespan=lifespan,
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(router)
app.include_router(plan_router)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/api/app.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/broswer.py |
from config.logger import setup_logging
setup_logging(console_level="INFO")
from loguru import logger
logger.info("Starting the application")
logger.info("Loading environment variables")
from dotenv import load_dotenv
load_dotenv()
logger.info("Loaded environment variables")
logger.info("Loading agents")
from agents.flight import flight_search_agent
from agents.hotel import hotel_search_agent
logger.info("Loaded agents")
# structured_output_agent = Agent(
# name="Structured Output Generator",
# model=model2,
# instructions="Generate structured output in the specified schema format. Parse input data and format according to schema requirements. DO NOT include any other text in your response.",
# expected_output=dedent("""\
# A JSON object with the following fields:
# - status (str): Success or error status of the request (success or error)
# - message (str): Status message or error description
# - data: Object containing the flight results
# - flights: A list of flight results
# Each flight has the following fields:
# - flight_number (str): The flight number of the flight
# - price (str): The price of the flight
# - airline (str): The airline of the flight
# - departure_time (str): The departure time of the flight
# - arrival_time (str): The arrival time of the flight
# - duration (str): The duration of the flight
# - stops (int): The number of stops of the flight
# **DO NOT include any other text in your response.**
# }"""),
# markdown=True,
# show_tool_calls=True,
# debug_mode=True,
# response_model=FlightResults,
# )
# response = flight_search_agent.run("""
# Give me flights from Mumbai to Singapore for premium economy on 1 july 2025 for 2 adults and 1 child and sort by cheapest
# """)
# print(response.content)
response = hotel_search_agent.run("""
Give me hotels in Singapore for 2 adults and 1 child on 1 july 2025 to 10 july 2025 and sort by cheapest
""")
print(response.content) | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/broswer.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/config/llm.py | from agno.models.google import Gemini
from agno.models.openai import OpenAIChat
from agno.models.openrouter import OpenRouter
# model = Gemini(id="gemini-2.0-flash-001", temperature=0.1)
# model2 = OpenAIChat(id="gpt-4o", temperature=0.1)
model = OpenRouter(id="google/gemini-2.0-flash-001", temperature=0.3, max_tokens=8096)
model2 = OpenRouter(id="openai/gpt-4o", temperature=0.1)
model_zero = OpenRouter(
id="google/gemini-2.0-flash-001", temperature=0.1, max_tokens=8096
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/config/llm.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/config/logger.py | import sys
import logging
import inspect
from typing import Dict, Any, Callable
from loguru import logger
from pathlib import Path
# Create logs directory if it doesn't exist
# LOGS_DIR = Path("logs")
# LOGS_DIR.mkdir(exist_ok=True)
def configure_logger(console_level: str = "INFO", log_format: str = None) -> None:
"""Configure loguru logger with console and file outputs
Args:
console_level: Minimum level for console logs
file_level: Minimum level for file logs
rotation: When to rotate log files (size or time)
retention: How long to keep log files
log_format: Optional custom format string
"""
# Remove default configuration
logger.remove()
# Use default format if none provided
if log_format is None:
log_format = "<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>"
# Add console handler
logger.add(
sys.stderr,
format=log_format,
level=console_level,
colorize=True,
backtrace=True,
diagnose=True,
)
# # Add file handler
# logger.add(
# LOGS_DIR / "app.log",
# format=log_format,
# level=console_level,
# )
# Intercept standard library logging to loguru
class InterceptHandler(logging.Handler):
"""Intercepts standard library logging and redirects to loguru"""
def emit(self, record: logging.LogRecord) -> None:
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame, depth = inspect.currentframe(), 0
while frame and frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
)
def patch_std_logging():
"""Patch all standard library loggers to use loguru"""
# Replace all existing handlers with the InterceptHandler
logging.basicConfig(handlers=[InterceptHandler()], level=0, force=True)
# Update all existing loggers
for name in logging.root.manager.loggerDict.keys():
logging_logger = logging.getLogger(name)
logging_logger.handlers = [InterceptHandler()]
logging_logger.propagate = False
# Update specific common libraries
for logger_name in ("uvicorn", "uvicorn.error", "uvicorn.access", "fastapi"):
logging_logger = logging.getLogger(logger_name)
logging_logger.handlers = [InterceptHandler()]
def setup_logging(console_level: str = "INFO", intercept_stdlib: bool = True) -> None:
"""Setup logging for the entire application
Args:
console_level: Minimum level for console output
file_level: Minimum level for file output
intercept_stdlib: Whether to patch standard library logging
"""
# Configure loguru
configure_logger(console_level=console_level)
# Optionally patch standard library logging
if intercept_stdlib:
patch_std_logging()
# Add extra context to logger
logger.configure(extra={"app_name": "decipher-research-agent"})
logger.info("Logging configured successfully")
def logger_hook(function_name: str, function_call: Callable, arguments: Dict[str, Any]):
"""Hook function that wraps the tool execution"""
logger.info(f"About to call {function_name} with arguments: {arguments}")
result = function_call(**arguments)
logger.info(f"Function call completed with result: {result}")
return result
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/config/logger.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/main.py | from dotenv import load_dotenv
from loguru import logger
# Load environment variables
logger.info("Loading environment variables")
load_dotenv()
logger.info("Environment variables loaded")
# Import and setup logging configuration
from config.logger import setup_logging
# Configure logging with loguru
setup_logging(console_level="INFO")
from api.app import app
if __name__ == "__main__":
logger.info("Starting TripCraft AI API server")
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/main.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/flight.py | from pydantic import BaseModel, Field
from typing import List, Optional
class FlightResult(BaseModel):
flight_number: str = Field(description="The flight number of the flight")
price: str = Field(description="The price of the flight")
airline: str = Field(description="The airline of the flight")
departure_time: str = Field(description="The departure time of the flight")
arrival_time: str = Field(description="The arrival time of the flight")
duration: str = Field(description="The duration of the flight")
stops: int = Field(description="The number of stops of the flight")
class FlightResults(BaseModel):
flights: List[FlightResult] = Field(description="The list of flights")
class FlightSearchRequest(BaseModel):
departure: str = Field(description="The departure airport")
destination: str = Field(description="The destination airport")
date: str = Field(description="The date of the flight")
return_date: Optional[str] = Field(description="The return date of the flight")
adults: int = Field(description="The number of adults")
children: int = Field(description="The number of children")
cabin_class: str = Field(description="The cabin class")
sort: str = Field(description="The sort order") | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/flight.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/hotel.py | from pydantic import BaseModel, Field
from typing import List
class HotelResult(BaseModel):
hotel_name: str = Field(description="The name of the hotel")
price: str = Field(description="The price of the hotel")
rating: str = Field(description="The rating of the hotel")
address: str = Field(description="The address of the hotel")
amenities: List[str] = Field(description="The amenities of the hotel")
description: str = Field(description="The description of the hotel")
url: str = Field(description="The url of the hotel")
class HotelResults(BaseModel):
hotels: List[HotelResult] = Field(description="The list of hotels")
class HotelSearchRequest(BaseModel):
destination: str = Field(description="The destination city or area")
check_in: str = Field(description="The date of check-in in the format 'YYYY-MM-DD'")
check_out: str = Field(description="The date of check-out in the format 'YYYY-MM-DD'")
adults: int = Field(description="The number of adults")
children: int = Field(description="The number of children")
rooms: int = Field(description="The number of rooms")
sort: str = Field(description="The sort order") | {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/hotel.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/plan_task.py | from datetime import datetime, timezone
from enum import Enum
from typing import Optional
from sqlalchemy import String, DateTime, Enum as SQLEnum, JSON
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
class TaskStatus(str, Enum):
queued = "queued"
in_progress = "in_progress"
success = "success"
error = "error"
@classmethod
def _missing_(cls, value):
"""Handle case-insensitive enum values."""
for member in cls:
if member.value.lower() == value.lower():
return member
return None
class Base(DeclarativeBase):
pass
class PlanTask(Base):
"""Model for tracking plan tasks and their states."""
__tablename__ = "plan_tasks"
id: Mapped[int] = mapped_column(primary_key=True)
trip_plan_id: Mapped[str] = mapped_column(String(50), index=True)
task_type: Mapped[str] = mapped_column(String(50))
status: Mapped[TaskStatus] = mapped_column(
SQLEnum(TaskStatus, name="plan_task_status")
)
input_data: Mapped[dict] = mapped_column(JSON)
output_data: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True)
error_message: Mapped[Optional[str]] = mapped_column(String(500), nullable=True)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
default=lambda: datetime.now(timezone.utc),
onupdate=lambda: datetime.now(timezone.utc),
)
| {
"repo_id": "Shubhamsaboo/awesome-llm-apps",
"file_path": "advanced_ai_agents/multi_agent_apps/agent_teams/ai_travel_planner_agent_team/backend/models/plan_task.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.