repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/agents_as_tools.py | ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/agents_as_tools.py | from agents import Agent, Runner, function_tool
import asyncio
# Define specialized research agent
research_agent = Agent(
name="Research Specialist",
instructions="""
You are a research specialist. Provide detailed, well-researched information
on any topic with proper analysis and insights. Focus on factual accuracy
and comprehensive coverage.
"""
)
# Define specialized writing agent
writing_agent = Agent(
name="Writing Specialist",
instructions="""
You are a professional writer. Take research information and create
well-structured, engaging content with proper formatting and flow.
Make content accessible and compelling for readers.
"""
)
# Define editing agent
editing_agent = Agent(
name="Editing Specialist",
instructions="""
You are a professional editor. Review written content for:
- Grammar and spelling errors
- Clarity and readability
- Structure and flow
- Consistency and tone
Provide the improved version of the content.
"""
)
# Create function tools from agents
@function_tool
async def research_tool(topic: str) -> str:
"""Research a topic using the specialized research agent with custom configuration"""
result = await Runner.run(
research_agent,
input=f"Research this topic thoroughly and provide key insights: {topic}",
max_turns=3 # Allow deeper research
)
return str(result.final_output)
@function_tool
async def writing_tool(content: str, style: str = "professional") -> str:
"""Transform content using the specialized writing agent with custom style"""
prompt = f"Write engaging {style} content based on this research: {content}"
result = await Runner.run(
writing_agent,
input=prompt,
max_turns=2
)
return str(result.final_output)
@function_tool
async def editing_tool(content: str) -> str:
"""Edit and improve content using the specialized editing agent"""
result = await Runner.run(
editing_agent,
input=f"Edit and improve this content for clarity, grammar, and engagement: {content}"
)
return str(result.final_output)
# Create orchestrator agent that uses other agents as tools
content_orchestrator = Agent(
name="Content Creation Orchestrator",
instructions="""
You are a content creation orchestrator that coordinates research, writing, and editing.
You have access to:
- research_tool: For in-depth topic research and insights
- writing_tool: For professional content creation (specify style: professional, casual, academic, etc.)
- editing_tool: For content review and improvement
When users request content:
1. First use research_tool to gather comprehensive information
2. Then use writing_tool to create well-structured content
3. Finally use editing_tool to polish and improve the final piece
Coordinate all three tools to create high-quality, well-researched content.
""",
tools=[research_tool, writing_tool, editing_tool]
)
# Example 1: Basic content creation workflow
async def basic_content_workflow():
"""Demonstrates basic orchestration using agents as tools"""
print("=== Basic Content Creation Workflow ===")
result = await Runner.run(
content_orchestrator,
"""Create a comprehensive article about the benefits of renewable energy.
I need it to be professional and well-researched, suitable for a business audience."""
)
print(f"Final article: {result.final_output}")
return result
# Example 2: Custom workflow with specific requirements
async def custom_workflow_example():
"""Shows orchestrator handling specific workflow requirements"""
print("\n=== Custom Workflow with Specific Requirements ===")
result = await Runner.run(
content_orchestrator,
"""I need content about artificial intelligence in healthcare for a technical blog.
Make sure to:
1. Research current AI applications in medical diagnosis
2. Write in an accessible but technical style
3. Include both benefits and challenges
4. Keep it under 500 words
Please go through the full research -> write -> edit process."""
)
print(f"Technical blog post: {result.final_output}")
return result
# Example 3: Comparison with direct agent orchestration
async def direct_orchestration_comparison():
"""Compares agents-as-tools vs direct orchestration"""
print("\n=== Direct Orchestration (Manual) ===")
topic = "The future of remote work"
# Manual orchestration - calling agents directly
print("Step 1: Research...")
research_result = await Runner.run(
research_agent,
f"Research trends and predictions about: {topic}"
)
print("Step 2: Writing...")
writing_result = await Runner.run(
writing_agent,
f"Write a professional article based on this research: {research_result.final_output}"
)
print("Step 3: Editing...")
editing_result = await Runner.run(
editing_agent,
f"Edit and improve this article: {writing_result.final_output}"
)
print(f"Manual orchestration result: {editing_result.final_output}")
print("\n=== Agents-as-Tools Orchestration (Automatic) ===")
# Automatic orchestration using orchestrator agent
orchestrated_result = await Runner.run(
content_orchestrator,
f"Create a professional article about: {topic}. Go through research, writing, and editing."
)
print(f"Automatic orchestration result: {orchestrated_result.final_output}")
return editing_result, orchestrated_result
# Example 4: Advanced orchestrator with conditional logic
async def advanced_orchestrator_example():
"""Shows more sophisticated orchestration logic"""
print("\n=== Advanced Orchestrator with Conditional Logic ===")
# Create advanced orchestrator with conditional workflows
advanced_orchestrator = Agent(
name="Advanced Content Orchestrator",
instructions="""
You are an intelligent content orchestrator that adapts workflows based on requirements.
Available tools:
- research_tool: For topic research
- writing_tool: For content creation (styles: professional, casual, academic, creative)
- editing_tool: For content improvement
Workflow decisions:
- For complex/technical topics: Do extra research first
- For creative content: Use creative writing style
- For short content: Skip detailed research
- For business content: Always edit for professionalism
- Always explain your workflow decisions
Adapt your approach based on the specific request.
""",
tools=[research_tool, writing_tool, editing_tool]
)
# Test with different content types
requests = [
"Write a quick social media post about coffee benefits",
"Create a detailed technical whitepaper on blockchain security",
"Write a creative story about a robot learning to paint"
]
for i, request in enumerate(requests, 1):
print(f"\nRequest {i}: {request}")
result = await Runner.run(advanced_orchestrator, request)
print(f"Result: {result.final_output}")
print("-" * 50)
return requests
# Main execution
async def main():
print("🔧 OpenAI Agents SDK - Agents as Tools Orchestration")
print("=" * 60)
await basic_content_workflow()
await custom_workflow_example()
await direct_orchestration_comparison()
await advanced_orchestrator_example()
print("\n✅ Agents as tools tutorial complete!")
print("Agents as tools enable sophisticated workflow orchestration with intelligent coordination")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/parallel_execution.py | ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/parallel_execution.py | import asyncio
from agents import Agent, ItemHelpers, Runner, trace
# Create specialized translation agent
spanish_agent = Agent(
name="Spanish Translator",
instructions="You translate the user's message to Spanish. Provide natural, fluent translations."
)
# Create translation quality picker
translation_picker = Agent(
name="Translation Quality Picker",
instructions="""
You are an expert in Spanish translations.
Given multiple Spanish translation options, pick the most natural, accurate, and fluent one.
Explain briefly why you chose that translation.
"""
)
# Example 1: Basic parallel execution with quality selection
async def parallel_translation_example():
"""Demonstrates running the same agent multiple times in parallel for quality"""
print("=== Parallel Translation with Quality Selection ===")
msg = "Hello, how are you today? I hope you're having a wonderful time!"
print(f"Original message: {msg}")
# Ensure the entire workflow is a single trace
with trace("Parallel Translation Workflow") as workflow_trace:
print("Running 3 parallel translation attempts...")
# Run 3 parallel translations
res_1, res_2, res_3 = await asyncio.gather(
Runner.run(spanish_agent, msg),
Runner.run(spanish_agent, msg),
Runner.run(spanish_agent, msg)
)
# Extract text outputs from results
outputs = [
ItemHelpers.text_message_outputs(res_1.new_items),
ItemHelpers.text_message_outputs(res_2.new_items),
ItemHelpers.text_message_outputs(res_3.new_items)
]
# Combine all translations for comparison
translations = "\n\n".join([f"Translation {i+1}: {output}" for i, output in enumerate(outputs)])
print(f"\nAll translations:\n{translations}")
# Use picker agent to select best translation
best_translation = await Runner.run(
translation_picker,
f"Original English: {msg}\n\nTranslations to choose from:\n{translations}"
)
print(f"\nBest translation selected: {best_translation.final_output}")
print(f"Workflow trace ID: {workflow_trace.trace_id}")
return best_translation
# Example 2: Parallel execution with different specialized agents
async def parallel_specialized_agents():
"""Shows parallel execution with different agents for diverse perspectives"""
print("\n=== Parallel Execution with Specialized Agents ===")
# Create different specialized agents
formal_translator = Agent(
name="Formal Spanish Translator",
instructions="Translate to formal, polite Spanish using 'usted' forms."
)
casual_translator = Agent(
name="Casual Spanish Translator",
instructions="Translate to casual, friendly Spanish using 'tú' forms."
)
regional_translator = Agent(
name="Mexican Spanish Translator",
instructions="Translate to Mexican Spanish with regional expressions and vocabulary."
)
msg = "Hey friend, want to grab some coffee later?"
print(f"Original message: {msg}")
with trace("Multi-Style Translation") as style_trace:
print("Running parallel translations with different styles...")
# Run different translation styles in parallel
formal_result, casual_result, regional_result = await asyncio.gather(
Runner.run(formal_translator, msg),
Runner.run(casual_translator, msg),
Runner.run(regional_translator, msg)
)
# Extract and display all results
formal_text = ItemHelpers.text_message_outputs(formal_result.new_items)
casual_text = ItemHelpers.text_message_outputs(casual_result.new_items)
regional_text = ItemHelpers.text_message_outputs(regional_result.new_items)
print(f"\nFormal style: {formal_text}")
print(f"Casual style: {casual_text}")
print(f"Regional style: {regional_text}")
# Let user choose preferred style
style_comparison = f"""
Original: {msg}
Formal Spanish: {formal_text}
Casual Spanish: {casual_text}
Mexican Spanish: {regional_text}
"""
style_recommendation = await Runner.run(
translation_picker,
f"Compare these translation styles and recommend which is most appropriate for the context: {style_comparison}"
)
print(f"\nStyle recommendation: {style_recommendation.final_output}")
print(f"Multi-style trace ID: {style_trace.trace_id}")
return style_recommendation
# Example 3: Parallel execution for content generation diversity
async def parallel_content_generation():
"""Demonstrates parallel content generation for creative diversity"""
print("\n=== Parallel Content Generation for Diversity ===")
# Create content generation agents with different approaches
creative_agent = Agent(
name="Creative Writer",
instructions="Write creative, engaging content with vivid imagery and storytelling."
)
informative_agent = Agent(
name="Informative Writer",
instructions="Write clear, factual, informative content focused on key information."
)
persuasive_agent = Agent(
name="Persuasive Writer",
instructions="Write compelling, persuasive content that motivates action."
)
topic = "The benefits of learning a new language"
print(f"Content topic: {topic}")
with trace("Diverse Content Generation") as content_trace:
print("Generating content with different writing styles in parallel...")
# Generate different content approaches simultaneously
creative_result, informative_result, persuasive_result = await asyncio.gather(
Runner.run(creative_agent, f"Write a short paragraph about: {topic}"),
Runner.run(informative_agent, f"Write a short paragraph about: {topic}"),
Runner.run(persuasive_agent, f"Write a short paragraph about: {topic}")
)
# Extract content
creative_content = ItemHelpers.text_message_outputs(creative_result.new_items)
informative_content = ItemHelpers.text_message_outputs(informative_result.new_items)
persuasive_content = ItemHelpers.text_message_outputs(persuasive_result.new_items)
print(f"\nCreative approach:\n{creative_content}")
print(f"\nInformative approach:\n{informative_content}")
print(f"\nPersuasive approach:\n{persuasive_content}")
# Synthesize best elements from all approaches
synthesis_agent = Agent(
name="Content Synthesizer",
instructions="Combine the best elements from multiple content pieces into one cohesive, high-quality paragraph."
)
combined_content = f"""
Topic: {topic}
Creative version: {creative_content}
Informative version: {informative_content}
Persuasive version: {persuasive_content}
"""
synthesized_result = await Runner.run(
synthesis_agent,
f"Create the best possible paragraph by combining elements from these approaches: {combined_content}"
)
print(f"\nSynthesized content: {synthesized_result.final_output}")
print(f"Content generation trace ID: {content_trace.trace_id}")
return synthesized_result
# Main execution
async def main():
print("🎼 OpenAI Agents SDK - Parallel Multi-Agent Execution")
print("=" * 60)
await parallel_translation_example()
await parallel_specialized_agents()
await parallel_content_generation()
print("\n✅ Parallel execution tutorial complete!")
print("Parallel execution enables quality improvement through diversity and selection")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/9_2_agents_as_tools/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/9_2_agents_as_tools/__init__.py | # Agents as Tools module for OpenAI Agents SDK tutorial
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/9_2_agents_as_tools/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/9_2_agents_as_tools/agent.py | from agents import Agent, Runner, function_tool
import asyncio
# Define specialized research agent
research_agent = Agent(
name="Research Specialist",
instructions="""
You are a research specialist. Provide detailed, well-researched information
on any topic with proper analysis and insights. Focus on factual accuracy
and comprehensive coverage.
"""
)
# Define specialized writing agent
writing_agent = Agent(
name="Writing Specialist",
instructions="""
You are a professional writer. Take research information and create
well-structured, engaging content with proper formatting and flow.
Make content accessible and compelling for readers.
"""
)
# Define editing agent
editing_agent = Agent(
name="Editing Specialist",
instructions="""
You are a professional editor. Review written content for:
- Grammar and spelling errors
- Clarity and readability
- Structure and flow
- Consistency and tone
Provide the improved version of the content.
"""
)
# Create function tools from agents
@function_tool
async def research_tool(topic: str) -> str:
"""Research a topic using the specialized research agent with custom configuration"""
result = await Runner.run(
research_agent,
input=f"Research this topic thoroughly and provide key insights: {topic}",
max_turns=3 # Allow deeper research
)
return str(result.final_output)
@function_tool
async def writing_tool(content: str, style: str = "professional") -> str:
"""Transform content using the specialized writing agent with custom style"""
prompt = f"Write engaging {style} content based on this research: {content}"
result = await Runner.run(
writing_agent,
input=prompt,
max_turns=2
)
return str(result.final_output)
@function_tool
async def editing_tool(content: str) -> str:
"""Edit and improve content using the specialized editing agent"""
result = await Runner.run(
editing_agent,
input=f"Edit and improve this content for clarity, grammar, and engagement: {content}"
)
return str(result.final_output)
# Create orchestrator agent that uses other agents as tools
content_orchestrator = Agent(
name="Content Creation Orchestrator",
instructions="""
You are a content creation orchestrator that coordinates research, writing, and editing.
You have access to:
- research_tool: For in-depth topic research and insights
- writing_tool: For professional content creation (specify style: professional, casual, academic, etc.)
- editing_tool: For content review and improvement
When users request content:
1. First use research_tool to gather comprehensive information
2. Then use writing_tool to create well-structured content
3. Finally use editing_tool to polish and improve the final piece
Coordinate all three tools to create high-quality, well-researched content.
""",
tools=[research_tool, writing_tool, editing_tool]
)
# Example 1: Basic content creation workflow
async def basic_content_workflow():
"""Demonstrates basic orchestration using agents as tools"""
print("=== Basic Content Creation Workflow ===")
result = await Runner.run(
content_orchestrator,
"""Create a comprehensive article about the benefits of renewable energy.
I need it to be professional and well-researched, suitable for a business audience."""
)
print(f"Final article: {result.final_output}")
return result
# Example 2: Custom workflow with specific requirements
async def custom_workflow_example():
"""Shows orchestrator handling specific workflow requirements"""
print("\n=== Custom Workflow with Specific Requirements ===")
result = await Runner.run(
content_orchestrator,
"""I need content about artificial intelligence in healthcare for a technical blog.
Make sure to:
1. Research current AI applications in medical diagnosis
2. Write in an accessible but technical style
3. Include both benefits and challenges
4. Keep it under 500 words
Please go through the full research -> write -> edit process."""
)
print(f"Technical blog post: {result.final_output}")
return result
# Example 3: Comparison with direct agent orchestration
async def direct_orchestration_comparison():
"""Compares agents-as-tools vs direct orchestration"""
print("\n=== Direct Orchestration (Manual) ===")
topic = "The future of remote work"
# Manual orchestration - calling agents directly
print("Step 1: Research...")
research_result = await Runner.run(
research_agent,
f"Research trends and predictions about: {topic}"
)
print("Step 2: Writing...")
writing_result = await Runner.run(
writing_agent,
f"Write a professional article based on this research: {research_result.final_output}"
)
print("Step 3: Editing...")
editing_result = await Runner.run(
editing_agent,
f"Edit and improve this article: {writing_result.final_output}"
)
print(f"Manual orchestration result: {editing_result.final_output}")
print("\n=== Agents-as-Tools Orchestration (Automatic) ===")
# Automatic orchestration using orchestrator agent
orchestrated_result = await Runner.run(
content_orchestrator,
f"Create a professional article about: {topic}. Go through research, writing, and editing."
)
print(f"Automatic orchestration result: {orchestrated_result.final_output}")
return editing_result, orchestrated_result
# Example 4: Advanced orchestrator with conditional logic
async def advanced_orchestrator_example():
"""Shows more sophisticated orchestration logic"""
print("\n=== Advanced Orchestrator with Conditional Logic ===")
# Create advanced orchestrator with conditional workflows
advanced_orchestrator = Agent(
name="Advanced Content Orchestrator",
instructions="""
You are an intelligent content orchestrator that adapts workflows based on requirements.
Available tools:
- research_tool: For topic research
- writing_tool: For content creation (styles: professional, casual, academic, creative)
- editing_tool: For content improvement
Workflow decisions:
- For complex/technical topics: Do extra research first
- For creative content: Use creative writing style
- For short content: Skip detailed research
- For business content: Always edit for professionalism
- Always explain your workflow decisions
Adapt your approach based on the specific request.
""",
tools=[research_tool, writing_tool, editing_tool]
)
# Test with different content types
requests = [
"Write a quick social media post about coffee benefits",
"Create a detailed technical whitepaper on blockchain security",
"Write a creative story about a robot learning to paint"
]
for i, request in enumerate(requests, 1):
print(f"\nRequest {i}: {request}")
result = await Runner.run(advanced_orchestrator, request)
print(f"Result: {result.final_output}")
print("-" * 50)
return requests
# Main execution
async def main():
print("🔧 OpenAI Agents SDK - Agents as Tools Orchestration")
print("=" * 60)
await basic_content_workflow()
await custom_workflow_example()
await direct_orchestration_comparison()
await advanced_orchestrator_example()
print("\n✅ Agents as tools tutorial complete!")
print("Agents as tools enable sophisticated workflow orchestration with intelligent coordination")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/9_1_parallel_execution/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/9_1_parallel_execution/__init__.py | # Parallel Execution module for OpenAI Agents SDK tutorial
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/9_1_parallel_execution/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/9_multi_agent_orchestration/9_1_parallel_execution/agent.py | import asyncio
from agents import Agent, ItemHelpers, Runner, trace
# Create specialized translation agent
spanish_agent = Agent(
name="Spanish Translator",
instructions="You translate the user's message to Spanish. Provide natural, fluent translations."
)
# Create translation quality picker
translation_picker = Agent(
name="Translation Quality Picker",
instructions="""
You are an expert in Spanish translations.
Given multiple Spanish translation options, pick the most natural, accurate, and fluent one.
Explain briefly why you chose that translation.
"""
)
# Example 1: Basic parallel execution with quality selection
async def parallel_translation_example():
"""Demonstrates running the same agent multiple times in parallel for quality"""
print("=== Parallel Translation with Quality Selection ===")
msg = "Hello, how are you today? I hope you're having a wonderful time!"
print(f"Original message: {msg}")
# Ensure the entire workflow is a single trace
with trace("Parallel Translation Workflow") as workflow_trace:
print("Running 3 parallel translation attempts...")
# Run 3 parallel translations
res_1, res_2, res_3 = await asyncio.gather(
Runner.run(spanish_agent, msg),
Runner.run(spanish_agent, msg),
Runner.run(spanish_agent, msg)
)
# Extract text outputs from results
outputs = [
ItemHelpers.text_message_outputs(res_1.new_items),
ItemHelpers.text_message_outputs(res_2.new_items),
ItemHelpers.text_message_outputs(res_3.new_items)
]
# Combine all translations for comparison
translations = "\n\n".join([f"Translation {i+1}: {output}" for i, output in enumerate(outputs)])
print(f"\nAll translations:\n{translations}")
# Use picker agent to select best translation
best_translation = await Runner.run(
translation_picker,
f"Original English: {msg}\n\nTranslations to choose from:\n{translations}"
)
print(f"\nBest translation selected: {best_translation.final_output}")
print(f"Workflow trace ID: {workflow_trace.trace_id}")
return best_translation
# Example 2: Parallel execution with different specialized agents
async def parallel_specialized_agents():
"""Shows parallel execution with different agents for diverse perspectives"""
print("\n=== Parallel Execution with Specialized Agents ===")
# Create different specialized agents
formal_translator = Agent(
name="Formal Spanish Translator",
instructions="Translate to formal, polite Spanish using 'usted' forms."
)
casual_translator = Agent(
name="Casual Spanish Translator",
instructions="Translate to casual, friendly Spanish using 'tú' forms."
)
regional_translator = Agent(
name="Mexican Spanish Translator",
instructions="Translate to Mexican Spanish with regional expressions and vocabulary."
)
msg = "Hey friend, want to grab some coffee later?"
print(f"Original message: {msg}")
with trace("Multi-Style Translation") as style_trace:
print("Running parallel translations with different styles...")
# Run different translation styles in parallel
formal_result, casual_result, regional_result = await asyncio.gather(
Runner.run(formal_translator, msg),
Runner.run(casual_translator, msg),
Runner.run(regional_translator, msg)
)
# Extract and display all results
formal_text = ItemHelpers.text_message_outputs(formal_result.new_items)
casual_text = ItemHelpers.text_message_outputs(casual_result.new_items)
regional_text = ItemHelpers.text_message_outputs(regional_result.new_items)
print(f"\nFormal style: {formal_text}")
print(f"Casual style: {casual_text}")
print(f"Regional style: {regional_text}")
# Let user choose preferred style
style_comparison = f"""
Original: {msg}
Formal Spanish: {formal_text}
Casual Spanish: {casual_text}
Mexican Spanish: {regional_text}
"""
style_recommendation = await Runner.run(
translation_picker,
f"Compare these translation styles and recommend which is most appropriate for the context: {style_comparison}"
)
print(f"\nStyle recommendation: {style_recommendation.final_output}")
print(f"Multi-style trace ID: {style_trace.trace_id}")
return style_recommendation
# Example 3: Parallel execution for content generation diversity
async def parallel_content_generation():
"""Demonstrates parallel content generation for creative diversity"""
print("\n=== Parallel Content Generation for Diversity ===")
# Create content generation agents with different approaches
creative_agent = Agent(
name="Creative Writer",
instructions="Write creative, engaging content with vivid imagery and storytelling."
)
informative_agent = Agent(
name="Informative Writer",
instructions="Write clear, factual, informative content focused on key information."
)
persuasive_agent = Agent(
name="Persuasive Writer",
instructions="Write compelling, persuasive content that motivates action."
)
topic = "The benefits of learning a new language"
print(f"Content topic: {topic}")
with trace("Diverse Content Generation") as content_trace:
print("Generating content with different writing styles in parallel...")
# Generate different content approaches simultaneously
creative_result, informative_result, persuasive_result = await asyncio.gather(
Runner.run(creative_agent, f"Write a short paragraph about: {topic}"),
Runner.run(informative_agent, f"Write a short paragraph about: {topic}"),
Runner.run(persuasive_agent, f"Write a short paragraph about: {topic}")
)
# Extract content
creative_content = ItemHelpers.text_message_outputs(creative_result.new_items)
informative_content = ItemHelpers.text_message_outputs(informative_result.new_items)
persuasive_content = ItemHelpers.text_message_outputs(persuasive_result.new_items)
print(f"\nCreative approach:\n{creative_content}")
print(f"\nInformative approach:\n{informative_content}")
print(f"\nPersuasive approach:\n{persuasive_content}")
# Synthesize best elements from all approaches
synthesis_agent = Agent(
name="Content Synthesizer",
instructions="Combine the best elements from multiple content pieces into one cohesive, high-quality paragraph."
)
combined_content = f"""
Topic: {topic}
Creative version: {creative_content}
Informative version: {informative_content}
Persuasive version: {persuasive_content}
"""
synthesized_result = await Runner.run(
synthesis_agent,
f"Create the best possible paragraph by combining elements from these approaches: {combined_content}"
)
print(f"\nSynthesized content: {synthesized_result.final_output}")
print(f"Content generation trace ID: {content_trace.trace_id}")
return synthesized_result
# Main execution
async def main():
print("🎼 OpenAI Agents SDK - Parallel Multi-Agent Execution")
print("=" * 60)
await parallel_translation_example()
await parallel_specialized_agents()
await parallel_content_generation()
print("\n✅ Parallel execution tutorial complete!")
print("Parallel execution enables quality improvement through diversity and selection")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/agent_runner.py | ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/agent_runner.py | import streamlit as st
import asyncio
import time
import json
from datetime import datetime
from agents import Agent, Runner, RunConfig, SQLiteSession
from agents.exceptions import (
AgentsException,
MaxTurnsExceeded,
ModelBehaviorError,
UserError
)
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Page configuration
st.set_page_config(
page_title="Agent Runner Demo",
page_icon="🚀",
layout="wide",
initial_sidebar_state="expanded"
)
# Initialize agents
@st.cache_resource
def initialize_agents():
"""Initialize agents for different demonstrations"""
execution_agent = Agent(
name="Execution Demo Agent",
instructions="""
You are a helpful assistant demonstrating different execution patterns.
Provide clear, informative responses that help users understand:
- Synchronous execution (blocking)
- Asynchronous execution (non-blocking)
- Streaming execution (real-time)
Keep responses appropriate for the execution method being demonstrated.
"""
)
conversation_agent = Agent(
name="Conversation Agent",
instructions="You are a helpful assistant that remembers conversation context. Reply concisely but reference previous context when relevant."
)
config_agent = Agent(
name="Configuration Demo Agent",
instructions="You are a helpful assistant that demonstrates run configuration options. Be precise and informative."
)
streaming_agent = Agent(
name="Streaming Demo Agent",
instructions="""
You are a helpful assistant that demonstrates streaming capabilities.
When asked to write long content, be comprehensive and detailed.
When asked technical questions, provide thorough explanations.
"""
)
return execution_agent, conversation_agent, config_agent, streaming_agent
# Session management
class StreamingCapture:
def __init__(self):
self.events = []
self.content = ""
self.start_time = None
self.end_time = None
def reset(self):
self.events = []
self.content = ""
self.start_time = None
self.end_time = None
# Initialize session state
if 'session_manager' not in st.session_state:
st.session_state.session_manager = {}
if 'streaming_capture' not in st.session_state:
st.session_state.streaming_capture = StreamingCapture()
# Main UI
def main():
st.title("🚀 Agent Runner Demo")
st.markdown("**Demonstrates OpenAI Agents SDK execution capabilities**")
# Initialize agents
execution_agent, conversation_agent, config_agent, streaming_agent = initialize_agents()
# Sidebar for configuration
with st.sidebar:
st.header("⚙️ Execution Configuration")
demo_type = st.selectbox(
"Select Demo Type",
["Execution Methods", "Conversation Management", "Run Configuration", "Streaming Events", "Exception Handling"]
)
st.divider()
# Global settings
st.subheader("Global Settings")
# Model configuration
model_choice = st.selectbox(
"Model",
["gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo"],
index=0
)
temperature = st.slider(
"Temperature",
min_value=0.0,
max_value=2.0,
value=0.7,
step=0.1
)
max_turns = st.number_input(
"Max Turns",
min_value=1,
max_value=20,
value=10
)
# Main content area
if demo_type == "Execution Methods":
render_execution_methods(execution_agent, model_choice, temperature, max_turns)
elif demo_type == "Conversation Management":
render_conversation_management(conversation_agent, model_choice, temperature, max_turns)
elif demo_type == "Run Configuration":
render_run_configuration(config_agent, model_choice, temperature, max_turns)
elif demo_type == "Streaming Events":
render_streaming_events(streaming_agent, model_choice, temperature, max_turns)
elif demo_type == "Exception Handling":
render_exception_handling(execution_agent, model_choice, temperature, max_turns)
def render_execution_methods(agent, model_choice, temperature, max_turns):
"""Render the execution methods demo"""
st.header("⚡ Execution Methods Demo")
st.markdown("Compare synchronous, asynchronous, and streaming execution patterns.")
col1, col2, col3 = st.columns(3)
with col1:
st.subheader("🔄 Synchronous (Blocking)")
st.caption("Runner.run_sync() - Blocks until complete")
with st.form("sync_form"):
sync_input = st.text_area("Your message:", key="sync_input", value="Explain synchronous execution in simple terms")
sync_submitted = st.form_submit_button("Run Sync")
if sync_submitted and sync_input:
with st.spinner("Processing synchronously..."):
start_time = time.time()
try:
result = Runner.run_sync(agent, sync_input)
execution_time = time.time() - start_time
st.success(f"✅ Completed in {execution_time:.2f}s")
st.write("**Response:**")
st.write(result.final_output)
except Exception as e:
st.error(f"❌ Error: {e}")
with col2:
st.subheader("⚡ Asynchronous (Non-blocking)")
st.caption("Runner.run() - Returns awaitable")
with st.form("async_form"):
async_input = st.text_area("Your message:", key="async_input", value="Explain asynchronous execution benefits")
async_submitted = st.form_submit_button("Run Async")
if async_submitted and async_input:
with st.spinner("Processing asynchronously..."):
start_time = time.time()
try:
result = asyncio.run(Runner.run(agent, async_input))
execution_time = time.time() - start_time
st.success(f"✅ Completed in {execution_time:.2f}s")
st.write("**Response:**")
st.write(result.final_output)
except Exception as e:
st.error(f"❌ Error: {e}")
with col3:
st.subheader("🌊 Streaming (Real-time)")
st.caption("Runner.run_streamed() - Live updates")
with st.form("streaming_form"):
streaming_input = st.text_area("Your message:", key="streaming_input", value="Write a detailed explanation of streaming execution")
streaming_submitted = st.form_submit_button("Run Streaming")
if streaming_submitted and streaming_input:
st.info("🔄 Streaming response...")
# Create containers for streaming output
response_container = st.empty()
progress_container = st.empty()
try:
full_response = ""
start_time = time.time()
async def stream_response():
nonlocal full_response
async for event in Runner.run_streamed(agent, streaming_input):
if hasattr(event, 'content') and event.content:
full_response += event.content
response_container.write(f"**Response:**\n{full_response}")
execution_time = time.time() - start_time
progress_container.success(f"✅ Streaming completed in {execution_time:.2f}s")
asyncio.run(stream_response())
except Exception as e:
st.error(f"❌ Streaming error: {e}")
def render_conversation_management(agent, model_choice, temperature, max_turns):
"""Render the conversation management demo"""
st.header("💬 Conversation Management Demo")
st.markdown("Compare manual conversation threading vs automatic session management.")
tab1, tab2 = st.tabs(["Manual Threading", "Session Management"])
with tab1:
st.subheader("🔧 Manual Conversation Threading")
st.caption("Using result.to_input_list() for conversation history")
# Initialize conversation history in session state
if 'manual_conversation' not in st.session_state:
st.session_state.manual_conversation = []
with st.form("manual_form"):
manual_input = st.text_input("Your message:")
manual_submitted = st.form_submit_button("Send Message")
if manual_submitted and manual_input:
with st.spinner("Processing..."):
try:
# Build input list manually
input_list = st.session_state.manual_conversation.copy()
input_list.append({"role": "user", "content": manual_input})
result = asyncio.run(Runner.run(agent, input_list))
# Update conversation history
st.session_state.manual_conversation = result.to_input_list()
st.success("Message sent!")
st.write(f"**Assistant:** {result.final_output}")
except Exception as e:
st.error(f"❌ Error: {e}")
# Show conversation history
if st.button("📋 Show Manual History"):
if st.session_state.manual_conversation:
st.write("**Conversation History:**")
for i, item in enumerate(st.session_state.manual_conversation, 1):
role_emoji = "👤" if item['role'] == 'user' else "🤖"
st.write(f"{i}. {role_emoji} **{item['role'].title()}:** {item['content']}")
else:
st.info("No conversation history yet.")
if st.button("🗑️ Clear Manual History"):
st.session_state.manual_conversation = []
st.success("Manual conversation history cleared!")
with tab2:
st.subheader("🔄 Automatic Session Management")
st.caption("Using SQLiteSession for automatic conversation memory")
session_id = "demo_conversation"
with st.form("session_form"):
session_input = st.text_input("Your message:")
session_submitted = st.form_submit_button("Send Message")
if session_submitted and session_input:
with st.spinner("Processing..."):
try:
# Get or create session
if session_id not in st.session_state.session_manager:
st.session_state.session_manager[session_id] = SQLiteSession(session_id)
session = st.session_state.session_manager[session_id]
result = asyncio.run(Runner.run(agent, session_input, session=session))
st.success("Message sent!")
st.write(f"**Assistant:** {result.final_output}")
except Exception as e:
st.error(f"❌ Error: {e}")
# Show session history
if st.button("📋 Show Session History"):
if session_id in st.session_state.session_manager:
session = st.session_state.session_manager[session_id]
try:
items = asyncio.run(session.get_items())
if items:
st.write("**Session History:**")
for i, item in enumerate(items, 1):
role_emoji = "👤" if item['role'] == 'user' else "🤖"
st.write(f"{i}. {role_emoji} **{item['role'].title()}:** {item['content']}")
else:
st.info("No session history yet.")
except Exception as e:
st.error(f"❌ Error retrieving history: {e}")
else:
st.info("No session created yet.")
if st.button("🗑️ Clear Session History"):
if session_id in st.session_state.session_manager:
try:
session = st.session_state.session_manager[session_id]
asyncio.run(session.clear_session())
del st.session_state.session_manager[session_id]
st.success("Session history cleared!")
except Exception as e:
st.error(f"❌ Error clearing session: {e}")
def render_run_configuration(agent, model_choice, temperature, max_turns):
"""Render the run configuration demo"""
st.header("⚙️ Run Configuration Demo")
st.markdown("Demonstrates advanced run configuration options with RunConfig.")
col1, col2 = st.columns(2)
with col1:
st.subheader("🎛️ Basic Configuration")
with st.form("basic_config_form"):
st.write("**Model Settings:**")
config_temperature = st.slider("Temperature", 0.0, 2.0, 0.1, 0.1, key="config_temp")
config_top_p = st.slider("Top P", 0.0, 1.0, 0.9, 0.1, key="config_top_p")
config_max_turns = st.number_input("Max Turns", 1, 20, 5, key="config_turns")
config_input = st.text_area("Your message:", value="Explain the weather in exactly 3 sentences.")
config_submitted = st.form_submit_button("Run with Config")
if config_submitted and config_input:
with st.spinner("Processing with configuration..."):
try:
run_config = RunConfig(
model=model_choice,
model_settings={
"temperature": config_temperature,
"top_p": config_top_p
},
max_turns=config_max_turns,
workflow_name="basic_config_demo"
)
start_time = time.time()
result = asyncio.run(Runner.run(agent, config_input, run_config=run_config))
execution_time = time.time() - start_time
st.success(f"✅ Completed in {execution_time:.2f}s")
st.write("**Response:**")
st.write(result.final_output)
# Show configuration used
st.write("**Configuration Used:**")
st.json({
"model": model_choice,
"temperature": config_temperature,
"top_p": config_top_p,
"max_turns": config_max_turns
})
except Exception as e:
st.error(f"❌ Error: {e}")
with col2:
st.subheader("📊 Tracing Configuration")
with st.form("tracing_config_form"):
st.write("**Tracing Settings:**")
workflow_name = st.text_input("Workflow Name", value="production_workflow")
group_id = st.text_input("Group ID", value="user_session_456")
user_id = st.text_input("User ID", value="user_123")
feature_name = st.text_input("Feature", value="chat_assistance")
tracing_input = st.text_area("Your message:", value="What are the benefits of structured logging?")
tracing_submitted = st.form_submit_button("Run with Tracing")
if tracing_submitted and tracing_input:
with st.spinner("Processing with tracing..."):
try:
run_config = RunConfig(
model=model_choice,
tracing_disabled=False,
trace_include_sensitive_data=False,
workflow_name=workflow_name,
group_id=group_id,
trace_metadata={
"user_id": user_id,
"feature": feature_name,
"timestamp": datetime.now().isoformat()
}
)
start_time = time.time()
result = asyncio.run(Runner.run(agent, tracing_input, run_config=run_config))
execution_time = time.time() - start_time
st.success(f"✅ Completed with tracing in {execution_time:.2f}s")
st.write("**Response:**")
st.write(result.final_output)
# Show tracing configuration
st.write("**Tracing Configuration:**")
st.json({
"workflow_name": workflow_name,
"group_id": group_id,
"metadata": {
"user_id": user_id,
"feature": feature_name
}
})
except Exception as e:
st.error(f"❌ Error: {e}")
def render_streaming_events(agent, model_choice, temperature, max_turns):
"""Render the streaming events demo"""
st.header("🌊 Streaming Events Demo")
st.markdown("Demonstrates advanced streaming event processing and real-time analytics.")
tab1, tab2 = st.tabs(["Basic Streaming", "Advanced Analytics"])
with tab1:
st.subheader("🎯 Basic Streaming with Event Processing")
with st.form("streaming_basic_form"):
streaming_input = st.text_area(
"Your message:",
value="Write a comprehensive explanation of how machine learning works, including examples."
)
streaming_submitted = st.form_submit_button("Start Streaming")
if streaming_submitted and streaming_input:
st.info("🔄 Streaming in progress...")
# Create containers
response_container = st.empty()
stats_container = st.empty()
try:
full_response = ""
events_count = 0
start_time = time.time()
async def process_streaming():
nonlocal full_response, events_count
async for event in Runner.run_streamed(agent, streaming_input):
events_count += 1
if hasattr(event, 'content') and event.content:
full_response += event.content
# Update display
response_container.write(f"**Response:**\n{full_response}")
# Update stats
elapsed = time.time() - start_time
char_count = len(full_response)
word_count = len(full_response.split())
stats_container.metric(
label="Streaming Progress",
value=f"{char_count} chars, {word_count} words",
delta=f"{elapsed:.1f}s elapsed"
)
asyncio.run(process_streaming())
final_time = time.time() - start_time
st.success(f"✅ Streaming completed! {events_count} events in {final_time:.2f}s")
except Exception as e:
st.error(f"❌ Streaming error: {e}")
with tab2:
st.subheader("📈 Advanced Streaming Analytics")
with st.form("streaming_analytics_form"):
analytics_input = st.text_area(
"Your message:",
value="Explain the benefits and challenges of renewable energy in detail."
)
analytics_submitted = st.form_submit_button("Stream with Analytics")
if analytics_submitted and analytics_input:
st.info("🔄 Streaming with analytics...")
# Create analytics containers
response_container = st.empty()
metrics_col1, metrics_col2, metrics_col3 = st.columns(3)
try:
analytics = {
"chunks": [],
"chunk_sizes": [],
"timestamps": [],
"content": ""
}
start_time = time.time()
async def process_analytics_streaming():
async for event in Runner.run_streamed(agent, analytics_input):
current_time = time.time()
if hasattr(event, 'content') and event.content:
# Collect analytics
analytics["chunks"].append(event.content)
analytics["chunk_sizes"].append(len(event.content))
analytics["timestamps"].append(current_time - start_time)
analytics["content"] += event.content
# Update display
response_container.write(f"**Response:**\n{analytics['content']}")
# Update metrics
with metrics_col1:
st.metric("Chunks", len(analytics["chunks"]))
with metrics_col2:
avg_chunk_size = sum(analytics["chunk_sizes"]) / len(analytics["chunk_sizes"])
st.metric("Avg Chunk Size", f"{avg_chunk_size:.1f} chars")
with metrics_col3:
elapsed = current_time - start_time
if elapsed > 0:
chars_per_sec = len(analytics["content"]) / elapsed
st.metric("Speed", f"{chars_per_sec:.1f} chars/s")
asyncio.run(process_analytics_streaming())
# Final analytics
total_time = time.time() - start_time
total_words = len(analytics["content"].split())
st.success(f"✅ Analytics complete!")
# Display final analytics
st.write("**Final Analytics:**")
col1, col2, col3, col4 = st.columns(4)
with col1:
st.metric("Total Time", f"{total_time:.2f}s")
with col2:
st.metric("Total Words", total_words)
with col3:
st.metric("Total Chunks", len(analytics["chunks"]))
with col4:
if total_time > 0:
st.metric("Words/Second", f"{total_words/total_time:.1f}")
except Exception as e:
st.error(f"❌ Analytics streaming error: {e}")
def render_exception_handling(agent, model_choice, temperature, max_turns):
"""Render the exception handling demo"""
st.header("⚠️ Exception Handling Demo")
st.markdown("Demonstrates proper exception handling for different SDK error scenarios.")
col1, col2 = st.columns(2)
with col1:
st.subheader("🚫 MaxTurns Exception")
st.caption("Trigger MaxTurnsExceeded exception")
with st.form("maxturns_form"):
max_turns_test = st.number_input("Max Turns (set low to trigger)", 1, 5, 2)
maxturns_input = st.text_area(
"Your message:",
value="Keep asking me questions and I'll keep responding. Let's have a long conversation."
)
maxturns_submitted = st.form_submit_button("Test MaxTurns")
if maxturns_submitted and maxturns_input:
try:
run_config = RunConfig(max_turns=max_turns_test)
result = asyncio.run(Runner.run(agent, maxturns_input, run_config=run_config))
st.success("✅ Completed without hitting max turns")
st.write(f"**Response:** {result.final_output}")
except MaxTurnsExceeded as e:
st.warning(f"⚠️ MaxTurnsExceeded: {e}")
st.info("This is expected when max_turns is set too low for complex conversations.")
except Exception as e:
st.error(f"❌ Unexpected error: {e}")
with col2:
st.subheader("🔧 General Exception Handling")
st.caption("Comprehensive exception handling")
with st.form("exception_form"):
exception_input = st.text_area("Your message:", value="Tell me about artificial intelligence")
exception_submitted = st.form_submit_button("Test Exception Handling")
if exception_submitted and exception_input:
try:
with st.spinner("Processing with full exception handling..."):
result = asyncio.run(Runner.run(agent, exception_input))
st.success("✅ Successfully processed")
st.write(f"**Response:** {result.final_output}")
except MaxTurnsExceeded as e:
st.warning(f"⚠️ Hit maximum turns limit: {e}")
st.info("Consider increasing max_turns or simplifying the request.")
except ModelBehaviorError as e:
st.error(f"🤖 Model behavior error: {e}")
st.info("The model produced unexpected output. Try rephrasing your request.")
except UserError as e:
st.error(f"👤 User error: {e}")
st.info("There's an issue with the request. Please check your input.")
except AgentsException as e:
st.error(f"🔧 SDK error: {e}")
st.info("An error occurred within the Agents SDK.")
except Exception as e:
st.error(f"❌ Unexpected error: {e}")
st.info("An unexpected error occurred. Please try again.")
# Exception handling reference
st.divider()
st.subheader("📚 Exception Handling Reference")
exception_info = {
"MaxTurnsExceeded": "Agent hit the maximum conversation turns limit",
"ModelBehaviorError": "LLM produced malformed or unexpected output",
"UserError": "Invalid SDK usage or request parameters",
"AgentsException": "Base exception for all SDK-related errors",
"InputGuardrailTripwireTriggered": "Input validation failed",
"OutputGuardrailTripwireTriggered": "Output validation failed"
}
for exception, description in exception_info.items():
st.write(f"**{exception}**: {description}")
# Footer
def render_footer():
st.divider()
st.markdown("""
### 🎯 Agent Runner Capabilities Demonstrated
1. **Execution Methods**: Sync, async, and streaming execution patterns
2. **Conversation Management**: Manual threading vs automatic sessions
3. **Run Configuration**: Model settings, tracing, and workflow management
4. **Streaming Events**: Real-time processing and analytics
5. **Exception Handling**: Comprehensive error handling patterns
**Key Benefits:**
- Flexible execution patterns for different use cases
- Automatic conversation memory with sessions
- Advanced configuration for production deployments
- Real-time streaming for better user experience
- Robust error handling for production reliability
""")
if __name__ == "__main__":
main()
render_footer()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_1_execution_methods/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_1_execution_methods/__init__.py | # Execution Methods Package
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_1_execution_methods/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_1_execution_methods/agent.py | from agents import Agent, Runner
import asyncio
# Create a simple agent for demonstrating execution methods
root_agent = Agent(
name="Execution Demo Agent",
instructions="""
You are a helpful assistant demonstrating different execution patterns.
Provide clear, informative responses that help users understand:
- Synchronous execution (blocking)
- Asynchronous execution (non-blocking)
- Streaming execution (real-time)
Keep responses appropriate for the execution method being demonstrated.
"""
)
# Example 1: Synchronous execution
def sync_execution_example():
"""Demonstrates Runner.run_sync() - blocking execution"""
result = Runner.run_sync(root_agent, "Explain synchronous execution in simple terms")
return result.final_output
# Example 2: Asynchronous execution
async def async_execution_example():
"""Demonstrates Runner.run() - non-blocking execution"""
result = await Runner.run(root_agent, "Explain asynchronous execution benefits")
return result.final_output
# Example 3: Streaming execution
async def streaming_execution_example():
"""Demonstrates Runner.run_streamed() - real-time streaming"""
full_response = ""
async for event in Runner.run_streamed(root_agent, "Write a detailed explanation of streaming execution"):
# Handle streaming events as they arrive
if hasattr(event, 'content') and event.content:
full_response += event.content
print(event.content, end='', flush=True) # Print in real-time
print() # New line after streaming
return full_response
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_2_conversation_management/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_2_conversation_management/__init__.py | # Conversation Management Package
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_2_conversation_management/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_2_conversation_management/agent.py | from agents import Agent, Runner, SQLiteSession
# Create an agent for demonstrating conversation management
root_agent = Agent(
name="Conversation Agent",
instructions="You are a helpful assistant that remembers conversation context. Reply concisely but reference previous context when relevant."
)
# Example 1: Manual conversation management
async def manual_conversation_example():
"""Demonstrates manual conversation management using result.to_input_list()"""
# First turn
result = await Runner.run(root_agent, "My name is Alice and I live in San Francisco.")
print(f"Turn 1: {result.final_output}")
# Second turn - manually pass conversation history
new_input = result.to_input_list() + [{"role": "user", "content": "What city do I live in?"}]
result = await Runner.run(root_agent, new_input)
print(f"Turn 2: {result.final_output}")
return result
# Example 2: Automatic conversation management with Sessions
async def session_conversation_example():
"""Demonstrates automatic conversation management using SQLiteSession"""
# Create session instance
session = SQLiteSession("conversation_123")
# First turn
result = await Runner.run(root_agent, "I'm a software developer working on AI projects.", session=session)
print(f"Session Turn 1: {result.final_output}")
# Second turn - session automatically remembers context
result = await Runner.run(root_agent, "What kind of work do I do?", session=session)
print(f"Session Turn 2: {result.final_output}")
return result
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_3_run_configuration/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_3_run_configuration/__init__.py | # Run Configuration Package
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_3_run_configuration/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_3_run_configuration/agent.py | from agents import Agent, Runner, RunConfig
# Create an agent for demonstrating run configuration
root_agent = Agent(
name="Configuration Demo Agent",
instructions="You are a helpful assistant that demonstrates run configuration options."
)
# Example 1: Basic run configuration with model settings
async def model_config_example():
"""Demonstrates run configuration with model overrides and settings"""
run_config = RunConfig(
model="gpt-4o", # Override agent's default model
model_settings={
"temperature": 0.1, # Low temperature for consistent responses
"top_p": 0.9
},
max_turns=5, # Limit conversation turns
workflow_name="demo_workflow", # For tracing
trace_metadata={"experiment": "config_demo"}
)
result = await Runner.run(
root_agent,
"Explain the weather in exactly 3 sentences.",
run_config=run_config
)
return result.final_output
# Example 2: Run configuration with tracing settings
async def tracing_config_example():
"""Demonstrates run configuration with tracing options"""
run_config = RunConfig(
tracing_disabled=False, # Enable tracing
trace_include_sensitive_data=False, # Exclude sensitive data
workflow_name="production_workflow",
group_id="user_session_456", # Link multiple runs
trace_metadata={
"user_id": "user_123",
"feature": "chat_assistance"
}
)
result = await Runner.run(
root_agent,
"What are the benefits of structured logging?",
run_config=run_config
)
return result.final_output
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_4_streaming_events/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_4_streaming_events/__init__.py | # Streaming Events module for OpenAI Agents SDK tutorial
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_4_streaming_events/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/4_running_agents/4_4_streaming_events/agent.py | from agents import Agent, Runner
import asyncio
import time
# Create agents for demonstrating streaming events
root_agent = Agent(
name="Streaming Demo Agent",
instructions="""
You are a helpful assistant that demonstrates streaming capabilities.
When asked to write long content, be comprehensive and detailed.
When asked technical questions, provide thorough explanations.
"""
)
# Example 1: Basic streaming with event processing
async def basic_streaming_example():
"""Demonstrates basic streaming event handling"""
print("=== Basic Streaming Events ===")
print("Requesting a detailed explanation...")
full_response = ""
start_time = time.time()
# Use run_streamed to get real-time events
async for event in Runner.run_streamed(
root_agent,
"Write a comprehensive explanation of how machine learning works, including examples."
):
# Process different types of streaming events
if hasattr(event, 'content') and event.content:
# This is a text content event
full_response += event.content
print(event.content, end='', flush=True)
if hasattr(event, 'type'):
# Handle different event types
if event.type == "response_start":
print(f"\n[EVENT] Response started")
elif event.type == "response_complete":
print(f"\n[EVENT] Response completed")
elapsed_time = time.time() - start_time
print(f"\n\nStreaming completed in {elapsed_time:.2f} seconds")
print(f"Total response length: {len(full_response)} characters")
return full_response
# Example 2: Advanced streaming with RunResultStreaming
async def advanced_streaming_example():
"""Shows how to work with RunResultStreaming object"""
print("\n=== Advanced Streaming with RunResultStreaming ===")
print("Generating a long story with progress tracking...")
# Track streaming progress
events_count = 0
chunks_received = []
# Get the streaming result generator
streaming_result = Runner.run_streamed(
root_agent,
"Write a creative short story about a robot who discovers emotions. Make it at least 500 words."
)
print("Processing streaming events:")
async for event in streaming_result:
events_count += 1
# Collect content chunks
if hasattr(event, 'content') and event.content:
chunks_received.append(event.content)
# Show progress every 10 chunks
if len(chunks_received) % 10 == 0:
print(f"\n[PROGRESS] Received {len(chunks_received)} chunks...")
print(event.content, end='', flush=True)
# Handle specific event types
if hasattr(event, 'type'):
if event.type == "tool_call_start":
print(f"\n[EVENT] Tool call started")
elif event.type == "tool_call_complete":
print(f"\n[EVENT] Tool call completed")
print(f"\n\nStreaming summary:")
print(f"- Total events processed: {events_count}")
print(f"- Content chunks received: {len(chunks_received)}")
print(f"- Final story length: {sum(len(chunk) for chunk in chunks_received)} characters")
# Access the final result
final_result = "".join(chunks_received)
return final_result
# Example 3: Streaming with custom processing
async def custom_streaming_processing():
"""Demonstrates custom streaming event processing"""
print("\n=== Custom Streaming Processing ===")
print("Analyzing streaming patterns...")
# Custom streaming analytics
analytics = {
"words_per_second": [],
"chunk_sizes": [],
"response_time": None,
"total_words": 0
}
start_time = time.time()
last_update = start_time
current_content = ""
async for event in Runner.run_streamed(
root_agent,
"Explain the benefits and challenges of renewable energy in detail."
):
current_time = time.time()
if hasattr(event, 'content') and event.content:
# Track chunk size
chunk_size = len(event.content)
analytics["chunk_sizes"].append(chunk_size)
# Update content
current_content += event.content
# Calculate words per second every few chunks
if len(analytics["chunk_sizes"]) % 5 == 0:
time_diff = current_time - last_update
if time_diff > 0:
words_in_chunk = len(event.content.split())
wps = words_in_chunk / time_diff
analytics["words_per_second"].append(wps)
last_update = current_time
print(event.content, end='', flush=True)
# Final analytics
analytics["response_time"] = time.time() - start_time
analytics["total_words"] = len(current_content.split())
print(f"\n\nStreaming Analytics:")
print(f"- Total response time: {analytics['response_time']:.2f} seconds")
print(f"- Total words: {analytics['total_words']}")
print(f"- Average chunk size: {sum(analytics['chunk_sizes'])/len(analytics['chunk_sizes']):.1f} chars")
if analytics["words_per_second"]:
avg_wps = sum(analytics["words_per_second"]) / len(analytics["words_per_second"])
print(f"- Average words per second: {avg_wps:.1f}")
return analytics
# Example 4: Streaming with error handling
async def streaming_with_error_handling():
"""Shows proper error handling for streaming operations"""
print("\n=== Streaming with Error Handling ===")
try:
response_parts = []
async for event in Runner.run_streamed(
root_agent,
"What are the top 3 programming languages and why?"
):
try:
if hasattr(event, 'content') and event.content:
response_parts.append(event.content)
print(event.content, end='', flush=True)
except Exception as chunk_error:
print(f"\n[ERROR] Error processing chunk: {chunk_error}")
continue # Continue with next chunk
print(f"\n\nStreaming completed successfully!")
print(f"Collected {len(response_parts)} response parts")
return "".join(response_parts)
except Exception as streaming_error:
print(f"\n[ERROR] Streaming failed: {streaming_error}")
return None
# Main execution
async def main():
print("🚀 OpenAI Agents SDK - Streaming Events")
print("=" * 60)
await basic_streaming_example()
await advanced_streaming_example()
await custom_streaming_processing()
await streaming_with_error_handling()
print("\n✅ Streaming events tutorial complete!")
print("Streaming enables real-time response processing for better user experience")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/default_tracing.py | ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/default_tracing.py | from agents import Agent, Runner
import asyncio
# Create agent for tracing demonstrations
root_agent = Agent(
name="Tracing Demo Agent",
instructions="""
You are a helpful assistant demonstrating tracing capabilities.
Respond concisely but perform actions that generate interesting trace data.
"""
)
# Example 1: Basic automatic tracing
async def basic_automatic_tracing():
"""Demonstrates default tracing that happens automatically"""
print("=== Basic Automatic Tracing ===")
print("Tracing is enabled by default - no setup required!")
print("View traces at: https://platform.openai.com/traces")
# Single agent run - creates one trace automatically
result = await Runner.run(
root_agent,
"Explain what tracing means in software development."
)
print(f"Response: {result.final_output}")
print(f"Trace ID: {result.run_id}") # Each run gets a unique ID
print("Check the OpenAI Traces dashboard to see this execution!")
return result
# Example 2: Multiple runs create separate traces
async def multiple_separate_traces():
"""Shows how separate runs create individual traces"""
print("\n=== Multiple Separate Traces ===")
print("Each Runner.run() call creates a separate trace")
# First trace
result1 = await Runner.run(
root_agent,
"What are the benefits of monitoring software?"
)
print(f"Trace 1 ID: {result1.run_id}")
# Second trace (separate from first)
result2 = await Runner.run(
root_agent,
"How do you debug performance issues?"
)
print(f"Trace 2 ID: {result2.run_id}")
print("Two separate traces created - each with its own workflow view")
return result1, result2
# Example 3: Understanding trace structure
async def trace_structure_demo():
"""Demonstrates what gets captured in traces"""
print("\n=== Trace Structure Demo ===")
print("Each trace automatically captures:")
print("• LLM generations (input/output)")
print("• Execution time and performance")
print("• Any errors or exceptions")
print("• Metadata and context")
# Create a run that will generate rich trace data
result = await Runner.run(
root_agent,
"List 3 key components of observability and explain each briefly."
)
print(f"Response generated: {len(result.final_output)} characters")
print(f"Trace contains rich data for run: {result.run_id}")
# Show what type of information is captured
print("\nIn the trace dashboard, you'll see:")
print("1. Workflow timeline with duration")
print("2. LLM generation details (model, tokens, etc.)")
print("3. Input/output content and metadata")
print("4. Performance metrics and execution flow")
return result
# Example 4: Tracing configuration options
async def tracing_configuration():
"""Shows how to configure tracing behavior"""
print("\n=== Tracing Configuration ===")
# Example of disabling tracing for specific run
from agents.run import RunConfig
print("Running with tracing disabled...")
result_no_trace = await Runner.run(
root_agent,
"This run won't be traced.",
run_config=RunConfig(tracing_disabled=True)
)
print(f"Run completed without tracing: {result_no_trace.run_id}")
print("(This run won't appear in traces dashboard)")
print("\nRunning with normal tracing...")
result_with_trace = await Runner.run(
root_agent,
"This run will be traced normally."
)
print(f"Run completed with tracing: {result_with_trace.run_id}")
print("(This run will appear in traces dashboard)")
return result_no_trace, result_with_trace
# Main execution
async def main():
print("🔍 OpenAI Agents SDK - Tracing Basics")
print("=" * 50)
await basic_automatic_tracing()
await multiple_separate_traces()
await trace_structure_demo()
await tracing_configuration()
print("\n✅ Tracing tutorial complete!")
print("Visit https://platform.openai.com/traces to explore your traces")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/custom_tracing.py | ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/custom_tracing.py | from agents import Agent, Runner, trace, custom_span
import asyncio
# Create agents for custom tracing demonstrations
research_agent = Agent(
name="Research Agent",
instructions="You are a research assistant. Provide concise, factual information."
)
analysis_agent = Agent(
name="Analysis Agent",
instructions="You analyze information and provide insights."
)
# Example 1: Custom trace for multi-step workflow
async def multi_step_workflow_trace():
"""Demonstrates grouping multiple agent runs in a single trace"""
print("=== Multi-Step Workflow Trace ===")
# Create custom trace that groups multiple operations
with trace("Research and Analysis Workflow") as workflow_trace:
print("Starting research phase...")
# Step 1: Research
research_result = await Runner.run(
research_agent,
"What are the key benefits of artificial intelligence in healthcare?"
)
print(f"Research complete: {len(research_result.final_output)} characters")
# Step 2: Analysis
analysis_result = await Runner.run(
analysis_agent,
f"Analyze this research and identify the top 3 benefits: {research_result.final_output}"
)
print(f"Analysis complete: {len(analysis_result.final_output)} characters")
# Step 3: Summary
summary_result = await Runner.run(
analysis_agent,
f"Create a brief executive summary of these findings: {analysis_result.final_output}"
)
print(f"Summary complete: {len(summary_result.final_output)} characters")
print(f"Workflow trace created: {workflow_trace.trace_id}")
print("All three agent runs are grouped in a single trace!")
return research_result, analysis_result, summary_result
# Example 2: Custom spans for business logic
async def custom_spans_demo():
"""Shows how to add custom spans for monitoring business logic"""
print("\n=== Custom Spans Demo ===")
with trace("Document Processing Workflow") as doc_trace:
# Custom span for data preparation
with custom_span("Data Preparation") as prep_span:
print("Preparing data...")
# Simulate data processing
await asyncio.sleep(0.1)
prep_span.add_event("Data loaded", {"records": 100})
prep_span.add_event("Data validated", {"errors": 0})
# Custom span for agent processing
with custom_span("AI Processing") as ai_span:
print("Processing with AI...")
result = await Runner.run(
research_agent,
"Summarize the importance of data quality in AI systems."
)
ai_span.add_event("Processing complete", {
"output_length": len(result.final_output),
"model_used": "gpt-4o"
})
# Custom span for post-processing
with custom_span("Post Processing") as post_span:
print("Post-processing results...")
await asyncio.sleep(0.1)
post_span.add_event("Results formatted", {"format": "text"})
post_span.add_event("Quality check passed", {"score": 0.95})
print(f"Document processing trace: {doc_trace.trace_id}")
print("Custom spans provide detailed workflow visibility!")
return result
# Example 3: Hierarchical spans
async def hierarchical_spans():
"""Demonstrates nested spans for complex workflows"""
print("\n=== Hierarchical Spans ===")
with trace("E-commerce Order Processing") as order_trace:
with custom_span("Order Validation") as validation_span:
print("Validating order...")
# Nested span for inventory check
with custom_span("Inventory Check") as inventory_span:
await asyncio.sleep(0.05)
inventory_span.add_event("Stock verified", {"available": True})
# Nested span for payment validation
with custom_span("Payment Validation") as payment_span:
await asyncio.sleep(0.05)
payment_span.add_event("Payment authorized", {"amount": 99.99})
validation_span.add_event("Order validated", {"order_id": "ORD-12345"})
with custom_span("AI Recommendation Generation") as rec_span:
print("Generating recommendations...")
result = await Runner.run(
research_agent,
"What are good complementary products for a wireless headset purchase?"
)
rec_span.add_event("Recommendations generated", {
"count": 3,
"confidence": 0.89
})
with custom_span("Order Completion") as completion_span:
print("Completing order...")
completion_span.add_event("Shipping scheduled", {"tracking": "TRK-789"})
completion_span.add_event("Email sent", {"type": "confirmation"})
print(f"E-commerce order trace: {order_trace.trace_id}")
print("Hierarchical spans show detailed operation breakdown!")
return result
# Example 4: Trace metadata and grouping
async def trace_metadata_demo():
"""Shows how to use trace metadata and grouping"""
print("\n=== Trace Metadata and Grouping ===")
# Create multiple traces with shared group ID
conversation_id = "conv_12345"
# First interaction in conversation
with trace(
"Customer Support - Initial Inquiry",
group_id=conversation_id,
metadata={"customer_id": "cust_789", "priority": "high"}
) as trace1:
result1 = await Runner.run(
research_agent,
"How do I reset my password?"
)
trace1.add_event("Initial inquiry processed", {"category": "password_reset"})
# Follow-up interaction in same conversation
with trace(
"Customer Support - Follow-up",
group_id=conversation_id,
metadata={"customer_id": "cust_789", "interaction": 2}
) as trace2:
result2 = await Runner.run(
analysis_agent,
f"Based on this password reset request, what additional security measures should we recommend? Context: {result1.final_output}"
)
trace2.add_event("Follow-up completed", {"recommendations_provided": True})
print(f"Conversation traces: {trace1.trace_id}, {trace2.trace_id}")
print(f"Grouped under conversation: {conversation_id}")
print("Metadata helps organize and filter traces in dashboard!")
return result1, result2
# Main execution
async def main():
print("🎨 OpenAI Agents SDK - Custom Tracing")
print("=" * 50)
await multi_step_workflow_trace()
await custom_spans_demo()
await hierarchical_spans()
await trace_metadata_demo()
print("\n✅ Custom tracing tutorial complete!")
print("Check the OpenAI Traces dashboard to see your custom workflow visualizations")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/10_2_custom_tracing/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/10_2_custom_tracing/__init__.py | # Custom Tracing module for OpenAI Agents SDK tutorial
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/10_2_custom_tracing/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/10_2_custom_tracing/agent.py | from agents import Agent, Runner, trace, custom_span
import asyncio
# Create agents for custom tracing demonstrations
research_agent = Agent(
name="Research Agent",
instructions="You are a research assistant. Provide concise, factual information."
)
analysis_agent = Agent(
name="Analysis Agent",
instructions="You analyze information and provide insights."
)
# Example 1: Custom trace for multi-step workflow
async def multi_step_workflow_trace():
"""Demonstrates grouping multiple agent runs in a single trace"""
print("=== Multi-Step Workflow Trace ===")
# Create custom trace that groups multiple operations
with trace("Research and Analysis Workflow") as workflow_trace:
print("Starting research phase...")
# Step 1: Research
research_result = await Runner.run(
research_agent,
"What are the key benefits of artificial intelligence in healthcare?"
)
print(f"Research complete: {len(research_result.final_output)} characters")
# Step 2: Analysis
analysis_result = await Runner.run(
analysis_agent,
f"Analyze this research and identify the top 3 benefits: {research_result.final_output}"
)
print(f"Analysis complete: {len(analysis_result.final_output)} characters")
# Step 3: Summary
summary_result = await Runner.run(
analysis_agent,
f"Create a brief executive summary of these findings: {analysis_result.final_output}"
)
print(f"Summary complete: {len(summary_result.final_output)} characters")
print(f"Workflow trace created: {workflow_trace.trace_id}")
print("All three agent runs are grouped in a single trace!")
return research_result, analysis_result, summary_result
# Example 2: Custom spans for business logic
async def custom_spans_demo():
"""Shows how to add custom spans for monitoring business logic"""
print("\n=== Custom Spans Demo ===")
with trace("Document Processing Workflow") as doc_trace:
# Custom span for data preparation
with custom_span("Data Preparation") as prep_span:
print("Preparing data...")
# Simulate data processing
await asyncio.sleep(0.1)
prep_span.add_event("Data loaded", {"records": 100})
prep_span.add_event("Data validated", {"errors": 0})
# Custom span for agent processing
with custom_span("AI Processing") as ai_span:
print("Processing with AI...")
result = await Runner.run(
research_agent,
"Summarize the importance of data quality in AI systems."
)
ai_span.add_event("Processing complete", {
"output_length": len(result.final_output),
"model_used": "gpt-4o"
})
# Custom span for post-processing
with custom_span("Post Processing") as post_span:
print("Post-processing results...")
await asyncio.sleep(0.1)
post_span.add_event("Results formatted", {"format": "text"})
post_span.add_event("Quality check passed", {"score": 0.95})
print(f"Document processing trace: {doc_trace.trace_id}")
print("Custom spans provide detailed workflow visibility!")
return result
# Example 3: Hierarchical spans
async def hierarchical_spans():
"""Demonstrates nested spans for complex workflows"""
print("\n=== Hierarchical Spans ===")
with trace("E-commerce Order Processing") as order_trace:
with custom_span("Order Validation") as validation_span:
print("Validating order...")
# Nested span for inventory check
with custom_span("Inventory Check") as inventory_span:
await asyncio.sleep(0.05)
inventory_span.add_event("Stock verified", {"available": True})
# Nested span for payment validation
with custom_span("Payment Validation") as payment_span:
await asyncio.sleep(0.05)
payment_span.add_event("Payment authorized", {"amount": 99.99})
validation_span.add_event("Order validated", {"order_id": "ORD-12345"})
with custom_span("AI Recommendation Generation") as rec_span:
print("Generating recommendations...")
result = await Runner.run(
research_agent,
"What are good complementary products for a wireless headset purchase?"
)
rec_span.add_event("Recommendations generated", {
"count": 3,
"confidence": 0.89
})
with custom_span("Order Completion") as completion_span:
print("Completing order...")
completion_span.add_event("Shipping scheduled", {"tracking": "TRK-789"})
completion_span.add_event("Email sent", {"type": "confirmation"})
print(f"E-commerce order trace: {order_trace.trace_id}")
print("Hierarchical spans show detailed operation breakdown!")
return result
# Example 4: Trace metadata and grouping
async def trace_metadata_demo():
"""Shows how to use trace metadata and grouping"""
print("\n=== Trace Metadata and Grouping ===")
# Create multiple traces with shared group ID
conversation_id = "conv_12345"
# First interaction in conversation
with trace(
"Customer Support - Initial Inquiry",
group_id=conversation_id,
metadata={"customer_id": "cust_789", "priority": "high"}
) as trace1:
result1 = await Runner.run(
research_agent,
"How do I reset my password?"
)
trace1.add_event("Initial inquiry processed", {"category": "password_reset"})
# Follow-up interaction in same conversation
with trace(
"Customer Support - Follow-up",
group_id=conversation_id,
metadata={"customer_id": "cust_789", "interaction": 2}
) as trace2:
result2 = await Runner.run(
analysis_agent,
f"Based on this password reset request, what additional security measures should we recommend? Context: {result1.final_output}"
)
trace2.add_event("Follow-up completed", {"recommendations_provided": True})
print(f"Conversation traces: {trace1.trace_id}, {trace2.trace_id}")
print(f"Grouped under conversation: {conversation_id}")
print("Metadata helps organize and filter traces in dashboard!")
return result1, result2
# Main execution
async def main():
print("🎨 OpenAI Agents SDK - Custom Tracing")
print("=" * 50)
await multi_step_workflow_trace()
await custom_spans_demo()
await hierarchical_spans()
await trace_metadata_demo()
print("\n✅ Custom tracing tutorial complete!")
print("Check the OpenAI Traces dashboard to see your custom workflow visualizations")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/10_1_default_tracing/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/10_1_default_tracing/__init__.py | # Default Tracing module for OpenAI Agents SDK tutorial
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/10_1_default_tracing/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/10_tracing_observability/10_1_default_tracing/agent.py | from agents import Agent, Runner
import asyncio
# Create agent for tracing demonstrations
root_agent = Agent(
name="Tracing Demo Agent",
instructions="""
You are a helpful assistant demonstrating tracing capabilities.
Respond concisely but perform actions that generate interesting trace data.
"""
)
# Example 1: Basic automatic tracing
async def basic_automatic_tracing():
"""Demonstrates default tracing that happens automatically"""
print("=== Basic Automatic Tracing ===")
print("Tracing is enabled by default - no setup required!")
print("View traces at: https://platform.openai.com/traces")
# Single agent run - creates one trace automatically
result = await Runner.run(
root_agent,
"Explain what tracing means in software development."
)
print(f"Response: {result.final_output}")
print(f"Trace ID: {result.run_id}") # Each run gets a unique ID
print("Check the OpenAI Traces dashboard to see this execution!")
return result
# Example 2: Multiple runs create separate traces
async def multiple_separate_traces():
"""Shows how separate runs create individual traces"""
print("\n=== Multiple Separate Traces ===")
print("Each Runner.run() call creates a separate trace")
# First trace
result1 = await Runner.run(
root_agent,
"What are the benefits of monitoring software?"
)
print(f"Trace 1 ID: {result1.run_id}")
# Second trace (separate from first)
result2 = await Runner.run(
root_agent,
"How do you debug performance issues?"
)
print(f"Trace 2 ID: {result2.run_id}")
print("Two separate traces created - each with its own workflow view")
return result1, result2
# Example 3: Understanding trace structure
async def trace_structure_demo():
"""Demonstrates what gets captured in traces"""
print("\n=== Trace Structure Demo ===")
print("Each trace automatically captures:")
print("• LLM generations (input/output)")
print("• Execution time and performance")
print("• Any errors or exceptions")
print("• Metadata and context")
# Create a run that will generate rich trace data
result = await Runner.run(
root_agent,
"List 3 key components of observability and explain each briefly."
)
print(f"Response generated: {len(result.final_output)} characters")
print(f"Trace contains rich data for run: {result.run_id}")
# Show what type of information is captured
print("\nIn the trace dashboard, you'll see:")
print("1. Workflow timeline with duration")
print("2. LLM generation details (model, tokens, etc.)")
print("3. Input/output content and metadata")
print("4. Performance metrics and execution flow")
return result
# Example 4: Tracing configuration options
async def tracing_configuration():
"""Shows how to configure tracing behavior"""
print("\n=== Tracing Configuration ===")
# Example of disabling tracing for specific run
from agents.run import RunConfig
print("Running with tracing disabled...")
result_no_trace = await Runner.run(
root_agent,
"This run won't be traced.",
run_config=RunConfig(tracing_disabled=True)
)
print(f"Run completed without tracing: {result_no_trace.run_id}")
print("(This run won't appear in traces dashboard)")
print("\nRunning with normal tracing...")
result_with_trace = await Runner.run(
root_agent,
"This run will be traced normally."
)
print(f"Run completed with tracing: {result_with_trace.run_id}")
print("(This run will appear in traces dashboard)")
return result_no_trace, result_with_trace
# Main execution
async def main():
print("🔍 OpenAI Agents SDK - Tracing Basics")
print("=" * 50)
await basic_automatic_tracing()
await multiple_separate_traces()
await trace_structure_demo()
await tracing_configuration()
print("\n✅ Tracing tutorial complete!")
print("Visit https://platform.openai.com/traces to explore your traces")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/__init__.py | # Voice agents module for OpenAI Agents SDK
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/util.py | ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/util.py | import threading
import time
from typing import Optional
import numpy as np
import sounddevice as sd
class AudioPlayer:
"""A simple audio player using sounddevice for real-time audio playback."""
def __init__(self, sample_rate: int = 24000, channels: int = 1, dtype=np.int16):
self.sample_rate = sample_rate
self.channels = channels
self.dtype = dtype
self.stream: Optional[sd.OutputStream] = None
self._stop_event = threading.Event()
def __enter__(self):
"""Context manager entry - start the audio stream."""
self.stream = sd.OutputStream(
samplerate=self.sample_rate,
channels=self.channels,
dtype=self.dtype
)
self.stream.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit - stop and close the audio stream."""
if self.stream:
self.stream.stop()
self.stream.close()
def add_audio(self, audio_data: np.ndarray):
"""Add audio data to be played immediately."""
if self.stream and not self._stop_event.is_set():
try:
self.stream.write(audio_data)
except Exception as e:
print(f"[error] Failed to play audio: {e}")
def stop(self):
"""Stop the audio player."""
self._stop_event.set()
def record_audio(
duration: float = 5.0,
sample_rate: int = 24000,
channels: int = 1,
dtype=np.int16
) -> np.ndarray:
"""
Record audio from the microphone for a specified duration.
Args:
duration: Recording duration in seconds
sample_rate: Audio sample rate (Hz)
channels: Number of audio channels
dtype: Audio data type
Returns:
Recorded audio as numpy array
"""
print(f"🎤 Recording audio for {duration} seconds... Press Ctrl+C to stop early.")
print("Say something now!")
try:
# Record audio
recording = sd.rec(
int(duration * sample_rate),
samplerate=sample_rate,
channels=channels,
dtype=dtype
)
# Wait for recording to complete
sd.wait()
print("✅ Recording completed!")
# Convert to 1D array if mono
if channels == 1:
recording = recording.flatten()
return recording.astype(dtype)
except KeyboardInterrupt:
print("\n⏹️ Recording stopped by user.")
sd.stop()
if 'recording' in locals():
return recording[:int(time.time() * sample_rate)].astype(dtype)
else:
# Return empty array if no recording was captured
return np.zeros(sample_rate, dtype=dtype)
except Exception as e:
print(f"❌ Recording failed: {e}")
return np.zeros(sample_rate, dtype=dtype)
def create_silence(duration: float = 1.0, sample_rate: int = 24000, dtype=np.int16) -> np.ndarray:
"""
Create a buffer of silence for the specified duration.
Args:
duration: Duration of silence in seconds
sample_rate: Audio sample rate (Hz)
dtype: Audio data type
Returns:
Silence buffer as numpy array
"""
return np.zeros(int(duration * sample_rate), dtype=dtype)
def save_audio(audio_data: np.ndarray, filename: str, sample_rate: int = 24000):
"""
Save audio data to a WAV file.
Args:
audio_data: Audio data as numpy array
filename: Output filename (should end with .wav)
sample_rate: Audio sample rate (Hz)
"""
try:
import soundfile as sf
sf.write(filename, audio_data, sample_rate)
print(f"✅ Audio saved to {filename}")
except ImportError:
print("❌ soundfile package required for saving audio. Install with: pip install soundfile")
except Exception as e:
print(f"❌ Failed to save audio: {e}")
def load_audio(filename: str, sample_rate: int = 24000, dtype=np.int16) -> np.ndarray:
"""
Load audio data from a WAV file.
Args:
filename: Input filename
sample_rate: Target sample rate (will resample if different)
dtype: Target data type
Returns:
Audio data as numpy array
"""
try:
import soundfile as sf
audio_data, original_sr = sf.read(filename)
# Resample if necessary
if original_sr != sample_rate:
import librosa
audio_data = librosa.resample(audio_data, orig_sr=original_sr, target_sr=sample_rate)
# Convert to target dtype
if dtype == np.int16:
audio_data = (audio_data * 32767).astype(np.int16)
return audio_data
except ImportError:
print("❌ soundfile and librosa packages required for loading audio.")
print("Install with: pip install soundfile librosa")
return np.zeros(sample_rate, dtype=dtype)
except Exception as e:
print(f"❌ Failed to load audio: {e}")
return np.zeros(sample_rate, dtype=dtype)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/__init__.py | # Static voice agent example
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/agent.py | import asyncio
import random
import numpy as np
from agents import Agent, function_tool
from agents.extensions.handoff_prompt import prompt_with_handoff_instructions
from agents.voice import (
AudioInput,
SingleAgentVoiceWorkflow,
SingleAgentWorkflowCallbacks,
VoicePipeline,
)
from .util import AudioPlayer, record_audio
"""
This is a simple example that uses a recorded audio buffer. Run it via:
`python -m ai_agent_framework_crash_course.openai_sdk_crash_course.11_voice.static.agent`
1. You can record an audio clip in the terminal.
2. The pipeline automatically transcribes the audio.
3. The agent workflow is a simple one that starts at the Assistant agent.
4. The output of the agent is streamed to the audio player.
Try examples like:
- Tell me a joke (will respond with a joke)
- What's the weather in Tokyo? (will call the `get_weather` tool and then speak)
- Hola, como estas? (will handoff to the spanish agent)
"""
@function_tool
def get_weather(city: str) -> str:
"""Get the weather for a given city."""
print(f"[debug] get_weather called with city: {city}")
choices = ["sunny", "cloudy", "rainy", "snowy"]
return f"The weather in {city} is {random.choice(choices)}."
@function_tool
def get_time() -> str:
"""Get the current time."""
import datetime
current_time = datetime.datetime.now().strftime("%I:%M %p")
print(f"[debug] get_time called, current time: {current_time}")
return f"The current time is {current_time}."
@function_tool
def calculate_tip(bill_amount: float, tip_percentage: float = 15.0) -> str:
"""Calculate tip amount for a bill."""
tip_amount = bill_amount * (tip_percentage / 100)
total_amount = bill_amount + tip_amount
print(f"[debug] calculate_tip called with bill: ${bill_amount}, tip: {tip_percentage}%")
return f"For a bill of ${bill_amount:.2f} with {tip_percentage}% tip, the tip is ${tip_amount:.2f} and total is ${total_amount:.2f}."
spanish_agent = Agent(
name="Spanish",
handoff_description="A spanish speaking agent.",
instructions=prompt_with_handoff_instructions(
"You're speaking to a human, so be polite and concise. Speak in Spanish only. "
"Help with weather, time, and calculations as needed."
),
model="gpt-4o-mini",
tools=[get_weather, get_time, calculate_tip]
)
french_agent = Agent(
name="French",
handoff_description="A french speaking agent.",
instructions=prompt_with_handoff_instructions(
"You're speaking to a human, so be polite and concise. Speak in French only. "
"Help with weather, time, and calculations as needed."
),
model="gpt-4o-mini",
tools=[get_weather, get_time, calculate_tip]
)
agent = Agent(
name="Assistant",
instructions=prompt_with_handoff_instructions(
"""You're speaking to a human, so be polite and concise.
You can help with:
- Weather information for any city
- Current time
- Tip calculations
- General conversation and jokes
Language handling:
- If the user speaks in Spanish, handoff to the Spanish agent
- If the user speaks in French, handoff to the French agent
- Otherwise, respond in English
Keep responses conversational and friendly for voice interaction."""
),
model="gpt-4o-mini",
handoffs=[spanish_agent, french_agent],
tools=[get_weather, get_time, calculate_tip],
)
class WorkflowCallbacks(SingleAgentWorkflowCallbacks):
"""Custom callbacks to monitor the voice workflow."""
def on_run(self, workflow: SingleAgentVoiceWorkflow, transcription: str) -> None:
"""Called when the workflow runs with a new transcription."""
print(f"[debug] 🎯 Workflow running with transcription: '{transcription}'")
def on_tool_call(self, tool_name: str, arguments: dict) -> None:
"""Called when a tool is about to be executed."""
print(f"[debug] 🔧 Tool call: {tool_name} with args: {arguments}")
def on_handoff(self, from_agent: str, to_agent: str) -> None:
"""Called when a handoff occurs between agents."""
print(f"[debug] 🔄 Handoff from {from_agent} to {to_agent}")
async def main():
"""Main function to run the static voice agent example."""
print("🎙️ Static Voice Agent Demo")
print("=" * 50)
print()
# Create the voice pipeline with our agent and callbacks
pipeline = VoicePipeline(
workflow=SingleAgentVoiceWorkflow(agent, callbacks=WorkflowCallbacks())
)
print("This demo will:")
print("1. 🎤 Record your voice for a few seconds")
print("2. 🔄 Transcribe your speech to text")
print("3. 🤖 Process with AI agent")
print("4. 🔊 Convert response back to speech")
print()
# Record audio input
try:
audio_buffer = record_audio(duration=5.0)
print(f"📊 Recorded {len(audio_buffer)} audio samples")
# Create audio input for the pipeline
audio_input = AudioInput(buffer=audio_buffer)
# Run the voice pipeline
print("\n🔄 Processing with voice pipeline...")
result = await pipeline.run(audio_input)
# Play the result audio
print("🔊 Playing AI response...")
with AudioPlayer() as player:
audio_chunks_received = 0
lifecycle_events = 0
async for event in result.stream():
if event.type == "voice_stream_event_audio":
player.add_audio(event.data)
audio_chunks_received += 1
if audio_chunks_received % 10 == 0: # Progress indicator
print(f"🎵 Received {audio_chunks_received} audio chunks...")
elif event.type == "voice_stream_event_lifecycle":
lifecycle_events += 1
print(f"📋 Lifecycle event: {event.event}")
elif event.type == "voice_stream_event_error":
print(f"❌ Error event: {event.error}")
# Add 1 second of silence to ensure the audio finishes playing
print("🔇 Adding silence buffer...")
player.add_audio(np.zeros(24000 * 1, dtype=np.int16))
print(f"\n✅ Voice interaction complete!")
print(f"📊 Statistics:")
print(f" - Audio chunks played: {audio_chunks_received}")
print(f" - Lifecycle events: {lifecycle_events}")
except KeyboardInterrupt:
print("\n⏹️ Demo interrupted by user.")
except Exception as e:
print(f"\n❌ Demo failed: {e}")
import traceback
traceback.print_exc()
def demo_with_examples():
"""Run multiple example scenarios for demonstration."""
examples = [
"Tell me a joke",
"What's the weather in New York?",
"What time is it?",
"Calculate a 20% tip on a $50 bill",
"Hola, como estas?", # Spanish handoff
"Bonjour, comment allez-vous?" # French handoff
]
print("🎭 Demo Examples:")
for i, example in enumerate(examples, 1):
print(f"{i}. {example}")
print()
print("You can try saying any of these examples when recording!")
if __name__ == "__main__":
print("🚀 OpenAI Agents SDK - Static Voice Demo")
print("=" * 60)
# Show example prompts
demo_with_examples()
# Run the main demo
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/util.py | ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/util.py | import threading
import time
from typing import Optional
import numpy as np
import sounddevice as sd
class AudioPlayer:
"""A simple audio player using sounddevice for real-time audio playback."""
def __init__(self, sample_rate: int = 24000, channels: int = 1, dtype=np.int16):
self.sample_rate = sample_rate
self.channels = channels
self.dtype = dtype
self.stream: Optional[sd.OutputStream] = None
self._stop_event = threading.Event()
def __enter__(self):
"""Context manager entry - start the audio stream."""
self.stream = sd.OutputStream(
samplerate=self.sample_rate,
channels=self.channels,
dtype=self.dtype
)
self.stream.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit - stop and close the audio stream."""
if self.stream:
self.stream.stop()
self.stream.close()
def add_audio(self, audio_data: np.ndarray):
"""Add audio data to be played immediately."""
if self.stream and not self._stop_event.is_set():
try:
self.stream.write(audio_data)
except Exception as e:
print(f"[error] Failed to play audio: {e}")
def stop(self):
"""Stop the audio player."""
self._stop_event.set()
class StreamedAudioRecorder:
"""A streaming audio recorder that captures audio in real-time."""
def __init__(self, sample_rate: int = 24000, channels: int = 1, dtype=np.int16, chunk_size: int = 1024):
self.sample_rate = sample_rate
self.channels = channels
self.dtype = dtype
self.chunk_size = chunk_size
self.stream: Optional[sd.InputStream] = None
self._audio_queue = []
self._stop_event = threading.Event()
self._lock = threading.Lock()
def __enter__(self):
"""Context manager entry - start the audio stream."""
self.stream = sd.InputStream(
samplerate=self.sample_rate,
channels=self.channels,
dtype=self.dtype,
blocksize=self.chunk_size,
callback=self._audio_callback
)
self.stream.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit - stop and close the audio stream."""
self._stop_event.set()
if self.stream:
self.stream.stop()
self.stream.close()
def _audio_callback(self, indata, frames, time, status):
"""Callback function for audio input stream."""
if status:
print(f"[warning] Audio input status: {status}")
with self._lock:
# Convert to the correct format and add to queue
audio_chunk = indata.copy().flatten().astype(self.dtype)
self._audio_queue.append(audio_chunk)
def get_audio_chunk(self) -> Optional[np.ndarray]:
"""Get the next available audio chunk."""
with self._lock:
if self._audio_queue:
return self._audio_queue.pop(0)
return None
def has_audio(self) -> bool:
"""Check if there's audio data available."""
with self._lock:
return len(self._audio_queue) > 0
def stop(self):
"""Stop the recorder."""
self._stop_event.set()
def record_audio(
duration: float = 5.0,
sample_rate: int = 24000,
channels: int = 1,
dtype=np.int16
) -> np.ndarray:
"""
Record audio from the microphone for a specified duration.
Args:
duration: Recording duration in seconds
sample_rate: Audio sample rate (Hz)
channels: Number of audio channels
dtype: Audio data type
Returns:
Recorded audio as numpy array
"""
print(f"🎤 Recording audio for {duration} seconds... Press Ctrl+C to stop early.")
print("Say something now!")
try:
# Record audio
recording = sd.rec(
int(duration * sample_rate),
samplerate=sample_rate,
channels=channels,
dtype=dtype
)
# Wait for recording to complete
sd.wait()
print("✅ Recording completed!")
# Convert to 1D array if mono
if channels == 1:
recording = recording.flatten()
return recording.astype(dtype)
except KeyboardInterrupt:
print("\n⏹️ Recording stopped by user.")
sd.stop()
if 'recording' in locals():
return recording[:int(time.time() * sample_rate)].astype(dtype)
else:
# Return empty array if no recording was captured
return np.zeros(sample_rate, dtype=dtype)
except Exception as e:
print(f"❌ Recording failed: {e}")
return np.zeros(sample_rate, dtype=dtype)
def create_silence(duration: float = 1.0, sample_rate: int = 24000, dtype=np.int16) -> np.ndarray:
"""
Create a buffer of silence for the specified duration.
Args:
duration: Duration of silence in seconds
sample_rate: Audio sample rate (Hz)
dtype: Audio data type
Returns:
Silence buffer as numpy array
"""
return np.zeros(int(duration * sample_rate), dtype=dtype)
def save_audio(audio_data: np.ndarray, filename: str, sample_rate: int = 24000):
"""
Save audio data to a WAV file.
Args:
audio_data: Audio data as numpy array
filename: Output filename (should end with .wav)
sample_rate: Audio sample rate (Hz)
"""
try:
import soundfile as sf
sf.write(filename, audio_data, sample_rate)
print(f"✅ Audio saved to {filename}")
except ImportError:
print("❌ soundfile package required for saving audio. Install with: pip install soundfile")
except Exception as e:
print(f"❌ Failed to save audio: {e}")
def load_audio(filename: str, sample_rate: int = 24000, dtype=np.int16) -> np.ndarray:
"""
Load audio data from a WAV file.
Args:
filename: Input filename
sample_rate: Target sample rate (will resample if different)
dtype: Target data type
Returns:
Audio data as numpy array
"""
try:
import soundfile as sf
audio_data, original_sr = sf.read(filename)
# Resample if necessary
if original_sr != sample_rate:
import librosa
audio_data = librosa.resample(audio_data, orig_sr=original_sr, target_sr=sample_rate)
# Convert to target dtype
if dtype == np.int16:
audio_data = (audio_data * 32767).astype(np.int16)
return audio_data
except ImportError:
print("❌ soundfile and librosa packages required for loading audio.")
print("Install with: pip install soundfile librosa")
return np.zeros(sample_rate, dtype=dtype)
except Exception as e:
print(f"❌ Failed to load audio: {e}")
return np.zeros(sample_rate, dtype=dtype)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/__init__.py | # Streaming voice agent example
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/agent.py | import asyncio
import random
import threading
import time
import numpy as np
from agents import Agent, function_tool
from agents.extensions.handoff_prompt import prompt_with_handoff_instructions
from agents.voice import (
StreamedAudioInput,
SingleAgentVoiceWorkflow,
SingleAgentWorkflowCallbacks,
VoicePipeline,
)
from .util import AudioPlayer, StreamedAudioRecorder, create_silence
"""
This is a streaming voice example that processes audio in real-time. Run it via:
`python -m ai_agent_framework_crash_course.openai_sdk_crash_course.11_voice.streamed.agent`
1. The pipeline continuously listens for audio input.
2. It automatically detects when you start and stop speaking.
3. The agent workflow processes speech in real-time.
4. The output is streamed back to you as audio.
This example demonstrates:
- Real-time speech detection and processing
- Streaming audio input and output
- Activity detection for turn-based conversation
- Interruption handling and turn management
Try examples like:
- Start speaking and the agent will respond when you finish
- Try multiple turns of conversation
- Test language handoffs with Spanish or French
"""
@function_tool
def get_weather(city: str) -> str:
"""Get the weather for a given city."""
print(f"[debug] get_weather called with city: {city}")
choices = ["sunny", "cloudy", "rainy", "snowy"]
return f"The weather in {city} is {random.choice(choices)}."
@function_tool
def get_time() -> str:
"""Get the current time."""
import datetime
current_time = datetime.datetime.now().strftime("%I:%M %p")
print(f"[debug] get_time called, current time: {current_time}")
return f"The current time is {current_time}."
@function_tool
def set_reminder(message: str, minutes: int = 5) -> str:
"""Set a simple reminder (demo function)."""
print(f"[debug] set_reminder called: '{message}' in {minutes} minutes")
return f"Reminder set: '{message}' in {minutes} minutes. (This is a demo - no actual reminder will be triggered)"
@function_tool
def get_news_summary() -> str:
"""Get a brief news summary (demo function)."""
print("[debug] get_news_summary called")
# Mock news items
news_items = [
"Technology stocks continue to rise amid AI developments",
"Climate change summit reaches new international agreements",
"Space exploration mission launches successfully",
"New renewable energy projects announced globally"
]
selected_news = random.choice(news_items)
return f"Here's a news update: {selected_news}. This is a demo news summary."
spanish_agent = Agent(
name="Spanish",
handoff_description="A spanish speaking agent.",
instructions=prompt_with_handoff_instructions(
"You're speaking to a human in real-time, so be polite and concise. Speak in Spanish only. "
"Help with weather, time, reminders, and news as needed. Keep responses brief for voice interaction."
),
model="gpt-4o-mini",
tools=[get_weather, get_time, set_reminder, get_news_summary]
)
french_agent = Agent(
name="French",
handoff_description="A french speaking agent.",
instructions=prompt_with_handoff_instructions(
"You're speaking to a human in real-time, so be polite and concise. Speak in French only. "
"Help with weather, time, reminders, and news as needed. Keep responses brief for voice interaction."
),
model="gpt-4o-mini",
tools=[get_weather, get_time, set_reminder, get_news_summary]
)
agent = Agent(
name="Assistant",
instructions=prompt_with_handoff_instructions(
"""You're speaking to a human in real-time voice conversation, so be polite and concise.
You can help with:
- Weather information for any city
- Current time
- Setting reminders (demo)
- News summaries (demo)
- General conversation
Language handling:
- If the user speaks in Spanish, handoff to the Spanish agent
- If the user speaks in French, handoff to the French agent
- Otherwise, respond in English
Keep responses brief and conversational since this is a voice interface.
Acknowledge when users switch topics or ask follow-up questions."""
),
model="gpt-4o-mini",
handoffs=[spanish_agent, french_agent],
tools=[get_weather, get_time, set_reminder, get_news_summary],
)
class StreamingWorkflowCallbacks(SingleAgentWorkflowCallbacks):
"""Custom callbacks to monitor the streaming voice workflow."""
def __init__(self):
self.turn_count = 0
self.start_time = time.time()
def on_run(self, workflow: SingleAgentVoiceWorkflow, transcription: str) -> None:
"""Called when the workflow runs with a new transcription."""
self.turn_count += 1
print(f"\n[debug] 🎯 Turn {self.turn_count} - Transcription: '{transcription}'")
def on_tool_call(self, tool_name: str, arguments: dict) -> None:
"""Called when a tool is about to be executed."""
print(f"[debug] 🔧 Tool call: {tool_name} with args: {arguments}")
def on_handoff(self, from_agent: str, to_agent: str) -> None:
"""Called when a handoff occurs between agents."""
print(f"[debug] 🔄 Handoff from {from_agent} to {to_agent}")
def on_turn_start(self) -> None:
"""Called when a new turn starts."""
elapsed = time.time() - self.start_time
print(f"[debug] ▶️ Turn started (session time: {elapsed:.1f}s)")
def on_turn_end(self) -> None:
"""Called when a turn ends."""
print(f"[debug] ⏹️ Turn ended")
class VoiceSessionManager:
"""Manages the voice session state and audio streams."""
def __init__(self):
self.is_running = False
self.audio_player = None
self.pipeline = None
self.callbacks = StreamingWorkflowCallbacks()
self._stop_event = threading.Event()
async def start_session(self):
"""Start the voice session."""
self.is_running = True
self._stop_event.clear()
# Create the voice pipeline
self.pipeline = VoicePipeline(
workflow=SingleAgentVoiceWorkflow(agent, callbacks=self.callbacks)
)
print("🎙️ Voice session started. Start speaking...")
print("💡 Tips:")
print(" - Speak clearly and pause between sentences")
print(" - Try asking about weather, time, or setting reminders")
print(" - Say something in Spanish or French to test language handoffs")
print(" - Press Ctrl+C to end the session")
print()
# Start audio recording and processing
await self._run_streaming_session()
async def _run_streaming_session(self):
"""Run the main streaming session loop."""
with StreamedAudioRecorder() as recorder:
with AudioPlayer() as player:
self.audio_player = player
# Create streamed audio input
streamed_input = StreamedAudioInput()
# Start the pipeline processing
result = await self.pipeline.run(streamed_input)
# Create tasks for audio input and output processing
input_task = asyncio.create_task(self._process_audio_input(recorder, streamed_input))
output_task = asyncio.create_task(self._process_audio_output(result))
try:
# Run both tasks concurrently
await asyncio.gather(input_task, output_task)
except asyncio.CancelledError:
print("\n🛑 Session cancelled")
finally:
# Cleanup
streamed_input.finish()
self.is_running = False
async def _process_audio_input(self, recorder: StreamedAudioRecorder, streamed_input: StreamedAudioInput):
"""Process incoming audio from the microphone."""
print("🎤 Listening for audio input...")
while self.is_running and not self._stop_event.is_set():
if recorder.has_audio():
audio_chunk = recorder.get_audio_chunk()
if audio_chunk is not None:
# Push audio to the pipeline
streamed_input.push_audio(audio_chunk)
# Small delay to prevent busy waiting
await asyncio.sleep(0.01)
print("⏹️ Audio input processing stopped")
async def _process_audio_output(self, result):
"""Process outgoing audio to the speakers."""
print("🔊 Ready to play audio responses...")
audio_chunks_count = 0
async for event in result.stream():
if self._stop_event.is_set():
break
if event.type == "voice_stream_event_audio":
if self.audio_player:
self.audio_player.add_audio(event.data)
audio_chunks_count += 1
# Progress indicator for long responses
if audio_chunks_count % 20 == 0:
print(f"🎵 Playing response... ({audio_chunks_count} chunks)")
elif event.type == "voice_stream_event_lifecycle":
if event.event == "turn_started":
print("🔄 AI is processing your speech...")
elif event.event == "turn_ended":
print("✅ AI response complete. You can speak again.")
# Add a small silence buffer between turns
if self.audio_player:
self.audio_player.add_audio(create_silence(0.5))
elif event.type == "voice_stream_event_error":
print(f"❌ Voice error: {event.error}")
print("⏹️ Audio output processing stopped")
def stop_session(self):
"""Stop the voice session."""
self.is_running = False
self._stop_event.set()
print("\n🛑 Stopping voice session...")
async def main():
"""Main function to run the streamed voice agent example."""
print("🎙️ Streaming Voice Agent Demo")
print("=" * 50)
print()
session_manager = VoiceSessionManager()
try:
await session_manager.start_session()
except KeyboardInterrupt:
print("\n⏹️ Demo interrupted by user.")
session_manager.stop_session()
except Exception as e:
print(f"\n❌ Demo failed: {e}")
import traceback
traceback.print_exc()
finally:
print("\n👋 Voice session ended. Thanks for trying the demo!")
def show_streaming_features():
"""Display information about streaming voice features."""
print("🌊 Streaming Voice Features:")
print("=" * 40)
print()
print("✨ Real-time Features:")
print(" • Continuous audio input processing")
print(" • Automatic speech activity detection")
print(" • Real-time agent response streaming")
print(" • Turn-based conversation management")
print()
print("🔧 Advanced Capabilities:")
print(" • Multi-language support with agent handoffs")
print(" • Tool calling during voice conversation")
print(" • Streaming callbacks for monitoring")
print(" • Interruption handling (via lifecycle events)")
print()
print("🎯 Try These Commands:")
print(" • 'What's the weather in Paris?'")
print(" • 'What time is it?'")
print(" • 'Set a reminder to call mom in 10 minutes'")
print(" • 'Give me a news summary'")
print(" • 'Hola, ¿cómo estás?' (Spanish)")
print(" • 'Bonjour, comment ça va?' (French)")
print()
if __name__ == "__main__":
print("🚀 OpenAI Agents SDK - Streaming Voice Demo")
print("=" * 60)
# Show streaming features
show_streaming_features()
# Run the main demo
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/realtime/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/realtime/__init__.py | # Realtime voice agent example
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/realtime/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/realtime/agent.py | import asyncio
from agents import function_tool
from agents.realtime import RealtimeAgent, RealtimeRunner, realtime_handoff
"""
Basic realtime voice agent example using OpenAI's Realtime API.
Run it via: python agent.py
This demonstrates the core realtime components from the official guide:
https://openai.github.io/openai-agents-python/realtime/guide/
Core Components:
1. RealtimeAgent - Agent with instructions, tools, and handoffs
2. RealtimeRunner - Manages configuration and sessions
3. RealtimeSession - Single conversation session
4. Event handling - Process audio, transcripts, and tool calls
"""
# Basic function tool
@function_tool
def get_weather(city: str) -> str:
"""Get current weather for a city."""
print(f"[debug] get_weather called with city: {city}")
return f"The weather in {city} is sunny, 72°F"
@function_tool
def book_appointment(date: str, time: str, service: str) -> str:
"""Book an appointment."""
print(f"[debug] book_appointment called: {service} on {date} at {time}")
return f"Appointment booked for {service} on {date} at {time}"
# Specialized agent for handoffs
billing_agent = RealtimeAgent(
name="Billing Support",
instructions="You specialize in billing and payment issues.",
)
# Main realtime agent
agent = RealtimeAgent(
name="Assistant",
instructions="You are a helpful voice assistant. Keep responses brief and conversational.",
tools=[get_weather, book_appointment],
handoffs=[
realtime_handoff(billing_agent, tool_description="Transfer to billing support")
]
)
async def main():
"""Basic realtime session example"""
print("🎙️ Basic Realtime Voice Agent")
print("=" * 40)
# Set up the runner with basic configuration
runner = RealtimeRunner(
starting_agent=agent,
config={
"model_settings": {
"model_name": "gpt-4o-realtime-preview",
"voice": "alloy",
"modalities": ["text", "audio"],
"input_audio_transcription": {
"model": "whisper-1"
},
"turn_detection": {
"type": "server_vad",
"threshold": 0.5,
"silence_duration_ms": 200
}
}
}
)
# Start the session
print("Starting realtime session...")
session = await runner.run()
print("Session started! Speak naturally - agent will respond in real-time.")
print("Try: 'What's the weather in Paris?' or 'Book appointment tomorrow at 2pm'")
print("Press Ctrl+C to end")
print("-" * 40)
# Handle session events
async with session:
try:
async for event in session:
# Handle key event types
if event.type == "response.audio_transcript.done":
print(f"🤖 Assistant: {event.transcript}")
elif event.type == "conversation.item.input_audio_transcription.completed":
print(f"👤 User: {event.transcript}")
elif event.type == "response.function_call_arguments.done":
print(f"🔧 Tool called: {event.name}")
elif event.type == "error":
print(f"❌ Error: {event.error}")
break
except KeyboardInterrupt:
print("\n⏹️ Session ended")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/calculator_agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/calculator_agent.py | """
OpenAI Agents SDK Tutorial 3: Tool Using Agent - Calculator
This module demonstrates how to create custom function tools for mathematical operations.
"""
import os
import math
from dotenv import load_dotenv
from agents import Agent, Runner, function_tool
# Load environment variables
load_dotenv()
@function_tool
def add_numbers(a: float, b: float) -> float:
"""Add two numbers together"""
return a + b
@function_tool
def subtract_numbers(a: float, b: float) -> float:
"""Subtract second number from first number"""
return a - b
@function_tool
def multiply_numbers(a: float, b: float) -> float:
"""Multiply two numbers together"""
return a * b
@function_tool
def divide_numbers(a: float, b: float) -> float:
"""Divide first number by second number"""
if b == 0:
return "Error: Cannot divide by zero"
return a / b
@function_tool
def calculate_compound_interest(principal: float, rate: float, time: int, compounds_per_year: int = 1) -> str:
"""Calculate compound interest using the formula A = P(1 + r/n)^(nt)"""
if principal <= 0 or rate < 0 or time <= 0 or compounds_per_year <= 0:
return "Error: All values must be positive"
# Convert percentage to decimal if needed
if rate > 1:
rate = rate / 100
amount = principal * (1 + rate/compounds_per_year) ** (compounds_per_year * time)
interest = amount - principal
return f"Principal: ${principal:,.2f}, Final Amount: ${amount:,.2f}, Interest Earned: ${interest:,.2f}"
@function_tool
def calculate_circle_area(radius: float) -> str:
"""Calculate the area of a circle given its radius"""
if radius <= 0:
return "Error: Radius must be positive"
area = math.pi * radius ** 2
return f"Circle with radius {radius} has area {area:.2f} square units"
@function_tool
def calculate_triangle_area(base: float, height: float) -> str:
"""Calculate the area of a triangle given base and height"""
if base <= 0 or height <= 0:
return "Error: Base and height must be positive"
area = 0.5 * base * height
return f"Triangle with base {base} and height {height} has area {area:.2f} square units"
@function_tool
def convert_temperature(temperature: float, from_unit: str, to_unit: str) -> str:
"""Convert temperature between Celsius, Fahrenheit, and Kelvin"""
from_unit = from_unit.lower()
to_unit = to_unit.lower()
# Convert to Celsius first
if from_unit == "fahrenheit" or from_unit == "f":
celsius = (temperature - 32) * 5/9
elif from_unit == "kelvin" or from_unit == "k":
celsius = temperature - 273.15
elif from_unit == "celsius" or from_unit == "c":
celsius = temperature
else:
return "Error: Supported units are Celsius, Fahrenheit, and Kelvin"
# Convert from Celsius to target unit
if to_unit == "fahrenheit" or to_unit == "f":
result = celsius * 9/5 + 32
unit_symbol = "°F"
elif to_unit == "kelvin" or to_unit == "k":
result = celsius + 273.15
unit_symbol = "K"
elif to_unit == "celsius" or to_unit == "c":
result = celsius
unit_symbol = "°C"
else:
return "Error: Supported units are Celsius, Fahrenheit, and Kelvin"
return f"{temperature}° {from_unit.title()} = {result:.2f}{unit_symbol}"
# Create the calculator agent
calculator_agent = Agent(
name="Calculator Agent",
instructions="""
You are a mathematical calculator assistant with access to various calculation tools.
You can help with:
- Basic arithmetic (addition, subtraction, multiplication, division)
- Compound interest calculations
- Geometric calculations (circle and triangle areas)
- Temperature conversions between Celsius, Fahrenheit, and Kelvin
When users ask for calculations:
1. Use the appropriate tool for the calculation
2. Explain what calculation you're performing
3. Show the result clearly
4. Provide additional context if helpful
Always use the provided tools rather than doing calculations yourself.
Be helpful and explain your calculations step by step.
""",
tools=[
add_numbers,
subtract_numbers,
multiply_numbers,
divide_numbers,
calculate_compound_interest,
calculate_circle_area,
calculate_triangle_area,
convert_temperature
]
)
def demonstrate_calculator():
"""Demonstrate the calculator agent with various examples"""
print("🎯 OpenAI Agents SDK - Tutorial 3: Calculator Agent")
print("=" * 60)
print()
# Test cases
test_cases = [
"Calculate 15 + 27",
"What's the compound interest on $5000 at 3.5% for 8 years?",
"Find the area of a circle with radius 12",
"Convert 100 degrees Fahrenheit to Celsius",
"What's 144 divided by 12?",
"Calculate the area of a triangle with base 8 and height 6"
]
for i, question in enumerate(test_cases, 1):
print(f"=== Calculation {i} ===")
print(f"Question: {question}")
try:
result = Runner.run_sync(calculator_agent, question)
print(f"Answer: {result.final_output}")
except Exception as e:
print(f"❌ Error: {e}")
print()
print("-" * 40)
print()
def interactive_mode():
"""Interactive calculator mode"""
print("=== Interactive Calculator ===")
print("Ask me to perform any mathematical calculation!")
print("Type 'quit' to exit.")
print()
while True:
question = input("Math Question: ").strip()
if question.lower() in ['quit', 'exit', 'bye']:
print("Goodbye!")
break
if not question:
continue
try:
result = Runner.run_sync(calculator_agent, question)
print(f"📊 Answer: {result.final_output}")
print()
except Exception as e:
print(f"❌ Error: {e}")
print()
def main():
"""Main function"""
# Check API key
if not os.getenv("OPENAI_API_KEY"):
print("❌ Error: OPENAI_API_KEY not found in environment variables")
print("Please create a .env file with your OpenAI API key")
return
try:
# Run demonstrations
demonstrate_calculator()
# Interactive mode
interactive_mode()
except Exception as e:
print(f"❌ Error: {e}")
if __name__ == "__main__":
main()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_1_function_tools/tools.py | ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_1_function_tools/tools.py | from agents import function_tool
@function_tool
def add_numbers(a: float, b: float) -> float:
"""Add two numbers together"""
return a + b
@function_tool
def multiply_numbers(a: float, b: float) -> float:
"""Multiply two numbers together"""
return a * b
@function_tool
def get_weather(city: str) -> str:
"""Get weather information for a city (mock implementation)"""
return f"The weather in {city} is sunny with 72°F"
@function_tool
def convert_temperature(temperature: float, from_unit: str, to_unit: str) -> str:
"""Convert temperature between Celsius and Fahrenheit"""
if from_unit.lower() == "celsius" and to_unit.lower() == "fahrenheit":
result = (temperature * 9/5) + 32
return f"{temperature}°C = {result:.1f}°F"
elif from_unit.lower() == "fahrenheit" and to_unit.lower() == "celsius":
result = (temperature - 32) * 5/9
return f"{temperature}°F = {result:.1f}°C"
else:
return "Unsupported temperature conversion"
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_1_function_tools/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_1_function_tools/__init__.py | # Function Tools Agent Package
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_1_function_tools/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_1_function_tools/agent.py | from agents import Agent
from .tools import add_numbers, multiply_numbers, get_weather, convert_temperature
# Create an agent with custom function tools
root_agent = Agent(
name="Function Tools Agent",
instructions="""
You are a helpful assistant with access to various tools.
Available tools:
- add_numbers: Add two numbers together
- multiply_numbers: Multiply two numbers together
- get_weather: Get weather information for a city
- convert_temperature: Convert between Celsius and Fahrenheit
When users ask for calculations or information:
1. Use the appropriate tool for the task
2. Explain what you're doing
3. Show the result clearly
Always use the provided tools rather than doing calculations yourself.
""",
tools=[add_numbers, multiply_numbers, get_weather, convert_temperature]
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_3_agents_as_tools/advanced_agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_3_agents_as_tools/advanced_agent.py | from agents import Agent, Runner, function_tool
# Define a specialized research agent
research_agent = Agent(
name="Research Specialist",
instructions="""
You are a research specialist. Provide detailed, well-researched information
on any topic with proper analysis and insights.
"""
)
# Define a writing agent
writing_agent = Agent(
name="Writing Specialist",
instructions="""
You are a professional writer. Take research information and create
well-structured, engaging content with proper formatting.
"""
)
@function_tool
async def run_research_agent(topic: str) -> str:
"""Research a topic using the specialized research agent with custom configuration"""
result = await Runner.run(
research_agent,
input=f"Research this topic thoroughly: {topic}",
max_turns=3 # Custom configuration
)
return str(result.final_output)
@function_tool
async def run_writing_agent(content: str, style: str = "professional") -> str:
"""Transform content using the specialized writing agent with custom style"""
prompt = f"Rewrite this content in a {style} style: {content}"
result = await Runner.run(
writing_agent,
input=prompt,
max_turns=2 # Custom configuration
)
return str(result.final_output)
# Create orchestrator with custom agent tools
advanced_orchestrator = Agent(
name="Content Creation Orchestrator",
instructions="""
You are a content creation orchestrator that combines research and writing expertise.
You have access to:
- Research agent: For in-depth topic research
- Writing agent: For professional content creation
When users request content:
1. First use the research agent to gather information
2. Then use the writing agent to create polished content
3. You can specify writing styles (professional, casual, academic, etc.)
Coordinate both agents to create comprehensive, well-written content.
""",
tools=[run_research_agent, run_writing_agent]
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_3_agents_as_tools/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_3_agents_as_tools/__init__.py | # Agents as Tools Package
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_3_agents_as_tools/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_3_agents_as_tools/agent.py | from agents import Agent
# Define specialized translation agents
spanish_agent = Agent(
name="Spanish Agent",
instructions="You translate the user's message to Spanish"
)
french_agent = Agent(
name="French Agent",
instructions="You translate the user's message to French"
)
german_agent = Agent(
name="German Agent",
instructions="You translate the user's message to German"
)
# Create orchestrator agent that uses other agents as tools
root_agent = Agent(
name="Translation Orchestrator",
instructions="""
You are a translation orchestrator agent. You coordinate specialized translation agents.
You have access to translation agents for:
- Spanish translations
- French translations
- German translations
When users request translations:
1. Use the appropriate translation agent tool
2. You can use multiple agents if asked for multiple translations
3. Present the results clearly with language labels
If asked for multiple translations, call the relevant tools for each language.
""",
tools=[
spanish_agent.as_tool(
tool_name="translate_to_spanish",
tool_description="Translate the user's message to Spanish"
),
french_agent.as_tool(
tool_name="translate_to_french",
tool_description="Translate the user's message to French"
),
german_agent.as_tool(
tool_name="translate_to_german",
tool_description="Translate the user's message to German"
)
]
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_2_builtin_tools/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_2_builtin_tools/__init__.py | # Built-in Tools Agent Package
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_2_builtin_tools/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/3_2_builtin_tools/agent.py | from agents import Agent
from agents.tools import WebSearchTool, CodeInterpreterTool
# Create an agent with built-in OpenAI tools
root_agent = Agent(
name="Built-in Tools Agent",
instructions="""
You are a research and computation assistant with access to powerful built-in tools.
Available tools:
- WebSearchTool: Search the web for current information
- CodeInterpreterTool: Execute Python code safely
You can help with:
- Finding current information and news
- Performing complex calculations
- Data analysis and visualization
- Mathematical computations
When users request information or calculations:
1. Use web search for current information
2. Use code execution for computations and analysis
3. Provide clear explanations of results
""",
tools=[WebSearchTool(), CodeInterpreterTool()]
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/streamlit_sessions_app.py | ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/streamlit_sessions_app.py | import streamlit as st
import asyncio
import os
from datetime import datetime
from agents import Agent, Runner, SQLiteSession
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Page configuration
st.set_page_config(
page_title="Session Management Demo",
page_icon="💬",
layout="wide",
initial_sidebar_state="expanded"
)
# Initialize agents
@st.cache_resource
def initialize_agents():
"""Initialize AI agents for different use cases"""
main_agent = Agent(
name="Session Demo Assistant",
instructions="""
You are a helpful assistant demonstrating session memory capabilities.
Remember previous conversation context and reference it when relevant.
Reply concisely but show that you remember previous interactions.
Be friendly and professional.
"""
)
support_agent = Agent(
name="Support Agent",
instructions="You are a customer support representative. Help with account and technical issues. Be helpful and solution-oriented."
)
sales_agent = Agent(
name="Sales Agent",
instructions="You are a sales representative. Help with product information and purchases. Be enthusiastic and informative."
)
return main_agent, support_agent, sales_agent
# Session management functions
class SessionManager:
def __init__(self):
self.sessions = {}
def get_session(self, session_id: str, db_file: str = "demo_sessions.db"):
"""Get or create a session"""
if session_id not in self.sessions:
self.sessions[session_id] = SQLiteSession(session_id, db_file)
return self.sessions[session_id]
async def clear_session(self, session_id: str):
"""Clear a specific session"""
if session_id in self.sessions:
await self.sessions[session_id].clear_session()
del self.sessions[session_id]
async def get_session_items(self, session_id: str, limit: int = None):
"""Get conversation items from a session"""
if session_id in self.sessions:
return await self.sessions[session_id].get_items(limit=limit)
return []
async def add_custom_items(self, session_id: str, items: list):
"""Add custom items to a session"""
if session_id in self.sessions:
await self.sessions[session_id].add_items(items)
async def pop_last_item(self, session_id: str):
"""Remove the last item from a session"""
if session_id in self.sessions:
return await self.sessions[session_id].pop_item()
return None
# Initialize session manager
if 'session_manager' not in st.session_state:
st.session_state.session_manager = SessionManager()
# Main UI
def main():
st.title("🔄 Session Management Demo")
st.markdown("**Demonstrates OpenAI Agents SDK session capabilities**")
# Initialize agents
main_agent, support_agent, sales_agent = initialize_agents()
# Sidebar for session configuration
with st.sidebar:
st.header("⚙️ Session Configuration")
demo_type = st.selectbox(
"Select Demo Type",
["Basic Sessions", "Memory Operations", "Multi Sessions"]
)
if demo_type == "Basic Sessions":
session_type = st.radio(
"Session Type",
["In-Memory", "Persistent"]
)
st.divider()
# Session controls
st.subheader("Session Controls")
if st.button("🗑️ Clear All Sessions"):
with st.spinner("Clearing sessions..."):
for session_id in list(st.session_state.session_manager.sessions.keys()):
asyncio.run(st.session_state.session_manager.clear_session(session_id))
st.success("All sessions cleared!")
st.rerun()
# Main content area
if demo_type == "Basic Sessions":
render_basic_sessions(main_agent)
elif demo_type == "Memory Operations":
render_memory_operations(main_agent)
elif demo_type == "Multi Sessions":
render_multi_sessions(support_agent, sales_agent)
def render_basic_sessions(agent):
"""Render the basic sessions demo"""
st.header("📝 Basic Sessions Demo")
st.markdown("Demonstrates fundamental session memory with automatic conversation history.")
col1, col2 = st.columns(2)
with col1:
st.subheader("💾 In-Memory Session")
st.caption("Temporary session storage (lost when app restarts)")
session_id = "in_memory_demo"
with st.form("in_memory_form"):
user_input = st.text_input("Your message:", key="in_memory_input")
submitted = st.form_submit_button("Send Message")
if submitted and user_input:
with st.spinner("Processing..."):
session = st.session_state.session_manager.get_session(session_id)
result = asyncio.run(Runner.run(agent, user_input, session=session))
st.success("Message sent!")
st.write(f"**Assistant:** {result.final_output}")
# Show conversation history
if st.button("📋 Show Conversation", key="show_in_memory"):
items = asyncio.run(st.session_state.session_manager.get_session_items(session_id))
if items:
st.write("**Conversation History:**")
for i, item in enumerate(items, 1):
role_emoji = "👤" if item['role'] == 'user' else "🤖"
st.write(f"{i}. {role_emoji} **{item['role'].title()}:** {item['content']}")
else:
st.info("No conversation history yet.")
with col2:
st.subheader("💽 Persistent Session")
st.caption("File-based storage (survives app restarts)")
session_id = "persistent_demo"
with st.form("persistent_form"):
user_input = st.text_input("Your message:", key="persistent_input")
submitted = st.form_submit_button("Send Message")
if submitted and user_input:
with st.spinner("Processing..."):
session = st.session_state.session_manager.get_session(session_id, "persistent_demo.db")
result = asyncio.run(Runner.run(agent, user_input, session=session))
st.success("Message sent!")
st.write(f"**Assistant:** {result.final_output}")
# Show conversation history
if st.button("📋 Show Conversation", key="show_persistent"):
items = asyncio.run(st.session_state.session_manager.get_session_items(session_id))
if items:
st.write("**Conversation History:**")
for i, item in enumerate(items, 1):
role_emoji = "👤" if item['role'] == 'user' else "🤖"
st.write(f"{i}. {role_emoji} **{item['role'].title()}:** {item['content']}")
else:
st.info("No conversation history yet.")
def render_memory_operations(agent):
"""Render the memory operations demo"""
st.header("🧠 Memory Operations Demo")
st.markdown("Demonstrates advanced session memory operations including item manipulation and corrections.")
session_id = "memory_operations_demo"
# Main conversation area
st.subheader("💬 Conversation")
with st.form("memory_conversation"):
user_input = st.text_input("Your message:")
submitted = st.form_submit_button("Send Message")
if submitted and user_input:
with st.spinner("Processing..."):
session = st.session_state.session_manager.get_session(session_id)
result = asyncio.run(Runner.run(agent, user_input, session=session))
st.success("Message sent!")
st.write(f"**Assistant:** {result.final_output}")
# Memory operations
col1, col2 = st.columns(2)
with col1:
st.subheader("📊 Memory Inspection")
if st.button("🔍 Get All Items"):
items = asyncio.run(st.session_state.session_manager.get_session_items(session_id))
if items:
st.write(f"**Total items:** {len(items)}")
for i, item in enumerate(items, 1):
role_emoji = "👤" if item['role'] == 'user' else "🤖"
content_preview = item['content'][:100] + "..." if len(item['content']) > 100 else item['content']
st.write(f"{i}. {role_emoji} **{item['role'].title()}:** {content_preview}")
else:
st.info("No items in session yet.")
# Get limited items
limit = st.number_input("Get last N items:", min_value=1, max_value=20, value=3)
if st.button("📋 Get Recent Items"):
items = asyncio.run(st.session_state.session_manager.get_session_items(session_id, limit=limit))
if items:
st.write(f"**Last {len(items)} items:**")
for i, item in enumerate(items, 1):
role_emoji = "👤" if item['role'] == 'user' else "🤖"
st.write(f"{i}. {role_emoji} **{item['role'].title()}:** {item['content']}")
else:
st.info("No items to show.")
with col2:
st.subheader("✏️ Memory Manipulation")
# Add custom items
st.write("**Add Custom Items:**")
with st.form("add_items_form"):
user_content = st.text_area("User message to add:")
assistant_content = st.text_area("Assistant response to add:")
add_submitted = st.form_submit_button("➕ Add Items")
if add_submitted and user_content and assistant_content:
custom_items = [
{"role": "user", "content": user_content},
{"role": "assistant", "content": assistant_content}
]
asyncio.run(st.session_state.session_manager.add_custom_items(session_id, custom_items))
st.success("Custom items added!")
# Pop last item (correction)
if st.button("↶ Undo Last Response"):
popped_item = asyncio.run(st.session_state.session_manager.pop_last_item(session_id))
if popped_item:
st.success(f"Removed: {popped_item['role']} - {popped_item['content'][:50]}...")
else:
st.warning("No items to remove.")
# Clear session
if st.button("🗑️ Clear Session"):
asyncio.run(st.session_state.session_manager.clear_session(session_id))
st.success("Session cleared!")
def render_multi_sessions(support_agent, sales_agent):
"""Render the multi-sessions demo"""
st.header("👥 Multi Sessions Demo")
st.markdown("Demonstrates managing multiple conversations and different agent contexts.")
tab1, tab2, tab3 = st.tabs(["👤 Multi-User", "🏢 Context-Based", "🔄 Agent Handoff"])
with tab1:
st.subheader("Different Users, Separate Sessions")
col1, col2 = st.columns(2)
with col1:
st.write("**👩 Alice's Session**")
alice_session_id = "user_alice"
with st.form("alice_form"):
alice_input = st.text_input("Alice's message:", key="alice_input")
alice_submitted = st.form_submit_button("Send as Alice")
if alice_submitted and alice_input:
with st.spinner("Processing Alice's message..."):
session = st.session_state.session_manager.get_session(alice_session_id, "multi_user.db")
result = asyncio.run(Runner.run(support_agent, alice_input, session=session))
st.write(f"**Support:** {result.final_output}")
if st.button("📋 Alice's History", key="alice_history"):
items = asyncio.run(st.session_state.session_manager.get_session_items(alice_session_id))
for item in items:
role_emoji = "👩" if item['role'] == 'user' else "🛠️"
st.write(f"{role_emoji} **{item['role'].title()}:** {item['content']}")
with col2:
st.write("**👨 Bob's Session**")
bob_session_id = "user_bob"
with st.form("bob_form"):
bob_input = st.text_input("Bob's message:", key="bob_input")
bob_submitted = st.form_submit_button("Send as Bob")
if bob_submitted and bob_input:
with st.spinner("Processing Bob's message..."):
session = st.session_state.session_manager.get_session(bob_session_id, "multi_user.db")
result = asyncio.run(Runner.run(support_agent, bob_input, session=session))
st.write(f"**Support:** {result.final_output}")
if st.button("📋 Bob's History", key="bob_history"):
items = asyncio.run(st.session_state.session_manager.get_session_items(bob_session_id))
for item in items:
role_emoji = "👨" if item['role'] == 'user' else "🛠️"
st.write(f"{role_emoji} **{item['role'].title()}:** {item['content']}")
with tab2:
st.subheader("Different Contexts, Different Sessions")
col1, col2 = st.columns(2)
with col1:
st.write("**🛠️ Support Context**")
support_session_id = "support_context"
with st.form("support_context_form"):
support_input = st.text_input("Support question:", key="support_context_input")
support_submitted = st.form_submit_button("Ask Support")
if support_submitted and support_input:
with st.spinner("Processing support question..."):
session = st.session_state.session_manager.get_session(support_session_id, "contexts.db")
result = asyncio.run(Runner.run(support_agent, support_input, session=session))
st.write(f"**Support:** {result.final_output}")
with col2:
st.write("**💰 Sales Context**")
sales_session_id = "sales_context"
with st.form("sales_context_form"):
sales_input = st.text_input("Sales inquiry:", key="sales_context_input")
sales_submitted = st.form_submit_button("Ask Sales")
if sales_submitted and sales_input:
with st.spinner("Processing sales inquiry..."):
session = st.session_state.session_manager.get_session(sales_session_id, "contexts.db")
result = asyncio.run(Runner.run(sales_agent, sales_input, session=session))
st.write(f"**Sales:** {result.final_output}")
with tab3:
st.subheader("Shared Session Across Different Agents")
st.caption("Customer handoff scenario - same conversation, different agents")
shared_session_id = "customer_handoff"
# Agent selector
selected_agent = st.radio(
"Select Agent:",
["Sales Agent", "Support Agent"],
horizontal=True
)
agent = sales_agent if selected_agent == "Sales Agent" else support_agent
with st.form("handoff_form"):
handoff_input = st.text_input("Customer message:")
handoff_submitted = st.form_submit_button(f"Send to {selected_agent}")
if handoff_submitted and handoff_input:
with st.spinner(f"Processing with {selected_agent}..."):
session = st.session_state.session_manager.get_session(shared_session_id, "shared.db")
result = asyncio.run(Runner.run(agent, handoff_input, session=session))
st.write(f"**{selected_agent}:** {result.final_output}")
# Show shared conversation history
if st.button("📋 Show Shared Conversation"):
items = asyncio.run(st.session_state.session_manager.get_session_items(shared_session_id))
if items:
st.write("**Shared Conversation History:**")
for i, item in enumerate(items, 1):
if item['role'] == 'user':
st.write(f"{i}. 👤 **Customer:** {item['content']}")
else:
# Try to determine which agent responded based on content
agent_emoji = "💰" if "sales" in item['content'].lower() or "price" in item['content'].lower() else "🛠️"
st.write(f"{i}. {agent_emoji} **Agent:** {item['content']}")
else:
st.info("No conversation history yet.")
# Footer
def render_footer():
st.divider()
st.markdown("""
### 🎯 Session Capabilities Demonstrated
1. **Basic Sessions**: In-memory vs persistent storage
2. **Memory Operations**: get_items(), add_items(), pop_item(), clear_session()
3. **Multi Sessions**: Multiple users, contexts, and agent handoffs
**Key Benefits:**
- Automatic conversation history management
- Flexible session organization strategies
- Memory manipulation for corrections and custom flows
- Multi-agent conversation support
""")
if __name__ == "__main__":
main()
render_footer()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_3_multi_sessions/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_3_multi_sessions/__init__.py | # Multi Sessions module for OpenAI Agents SDK tutorial
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_3_multi_sessions/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_3_multi_sessions/agent.py | from agents import Agent, Runner, SQLiteSession
# Create agents for multi-session demonstrations
support_agent = Agent(
name="Support Agent",
instructions="You are a customer support representative. Help with account and technical issues."
)
sales_agent = Agent(
name="Sales Agent",
instructions="You are a sales representative. Help with product information and purchases."
)
# Example 1: Different users with separate sessions
async def multi_user_sessions():
"""Demonstrates separate sessions for different users"""
print("=== Multi-User Sessions ===")
# Create separate sessions for different users
alice_session = SQLiteSession("user_alice", "multi_user.db")
bob_session = SQLiteSession("user_bob", "multi_user.db")
# Alice's conversation
print("Alice's conversation:")
result = await Runner.run(support_agent, "I forgot my password", session=alice_session)
print(f"Alice: I forgot my password")
print(f"Support: {result.final_output}")
result = await Runner.run(support_agent, "My email is alice@example.com", session=alice_session)
print(f"Alice: My email is alice@example.com")
print(f"Support: {result.final_output}")
# Bob's separate conversation
print("\nBob's conversation:")
result = await Runner.run(support_agent, "My app keeps crashing", session=bob_session)
print(f"Bob: My app keeps crashing")
print(f"Support: {result.final_output}")
# Alice continues her conversation (agent remembers her context)
print("\nAlice continues:")
result = await Runner.run(support_agent, "Did you find my account?", session=alice_session)
print(f"Alice: Did you find my account?")
print(f"Support: {result.final_output}")
return alice_session, bob_session
# Example 2: Different conversation contexts
async def context_based_sessions():
"""Demonstrates sessions for different conversation contexts"""
print("\n=== Context-Based Sessions ===")
# Different conversation contexts
support_session = SQLiteSession("support_ticket_123", "contexts.db")
sales_session = SQLiteSession("sales_inquiry_456", "contexts.db")
# Support conversation
print("Support context:")
result = await Runner.run(support_agent, "I can't access my premium features", session=support_session)
print(f"Customer: I can't access my premium features")
print(f"Support: {result.final_output}")
# Sales conversation
print("\nSales context:")
result = await Runner.run(sales_agent, "What premium features do you offer?", session=sales_session)
print(f"Prospect: What premium features do you offer?")
print(f"Sales: {result.final_output}")
# Continue support conversation
print("\nBack to support:")
result = await Runner.run(support_agent, "I'm on the premium plan", session=support_session)
print(f"Customer: I'm on the premium plan")
print(f"Support: {result.final_output}")
return support_session, sales_session
# Example 3: Shared session across different agents
async def shared_session_agents():
"""Demonstrates how different agents can share the same session"""
print("\n=== Shared Session Across Agents ===")
# Shared session for customer handoff scenario
shared_session = SQLiteSession("customer_handoff", "shared.db")
# Start with sales agent
print("Starting with Sales Agent:")
result = await Runner.run(
sales_agent,
"I'm interested in your premium plan but have technical questions.",
session=shared_session
)
print(f"Customer: I'm interested in your premium plan but have technical questions.")
print(f"Sales: {result.final_output}")
# Handoff to support agent (same session, so context is preserved)
print("\nHandoff to Support Agent:")
result = await Runner.run(
support_agent,
"Can you help me understand the technical requirements?",
session=shared_session
)
print(f"Customer: Can you help me understand the technical requirements?")
print(f"Support: {result.final_output}")
# Back to sales for closing
print("\nBack to Sales Agent:")
result = await Runner.run(
sales_agent,
"Thanks for the technical info. How do I upgrade?",
session=shared_session
)
print(f"Customer: Thanks for the technical info. How do I upgrade?")
print(f"Sales: {result.final_output}")
return shared_session
# Example 4: Session organization strategies
async def session_organization():
"""Demonstrates different session organization strategies"""
print("\n=== Session Organization Strategies ===")
# Strategy 1: User-based with timestamps
import datetime
timestamp = datetime.datetime.now().strftime("%Y%m%d")
user_daily_session = SQLiteSession(f"user_123_{timestamp}", "daily_sessions.db")
# Strategy 2: Feature-based sessions
chat_session = SQLiteSession("chat_feature_user_123", "feature_sessions.db")
support_session = SQLiteSession("support_feature_user_123", "feature_sessions.db")
# Strategy 3: Thread-based sessions
thread_session = SQLiteSession("thread_abc123", "thread_sessions.db")
# Demonstrate different approaches
print("Daily user session:")
result = await Runner.run(support_agent, "Daily check-in", session=user_daily_session)
print(f"Response: {result.final_output}")
print("\nFeature-specific chat:")
result = await Runner.run(support_agent, "Chat feature question", session=chat_session)
print(f"Response: {result.final_output}")
print("\nThread-based conversation:")
result = await Runner.run(support_agent, "Thread conversation", session=thread_session)
print(f"Response: {result.final_output}")
return user_daily_session, chat_session, thread_session
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_1_basic_sessions/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_1_basic_sessions/__init__.py | # Basic Sessions module for OpenAI Agents SDK tutorial
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_1_basic_sessions/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_1_basic_sessions/agent.py | from agents import Agent, Runner, SQLiteSession
# Create an agent for session demonstrations
root_agent = Agent(
name="Session Demo Assistant",
instructions="""
You are a helpful assistant that demonstrates session memory.
Remember previous conversation context and reference it when relevant.
Reply concisely but show that you remember previous interactions.
"""
)
# Example 1: In-memory session (temporary)
async def in_memory_session_example():
"""Demonstrates in-memory SQLite session that doesn't persist"""
# In-memory session - lost when process ends
session = SQLiteSession("temp_conversation")
print("=== In-Memory Session Example ===")
# First turn
result = await Runner.run(
root_agent,
"My name is Alice and I live in San Francisco.",
session=session
)
print(f"Turn 1: {result.final_output}")
# Second turn - agent remembers automatically
result = await Runner.run(
root_agent,
"What city do I live in?",
session=session
)
print(f"Turn 2: {result.final_output}")
return session
# Example 2: Persistent session (survives restarts)
async def persistent_session_example():
"""Demonstrates persistent SQLite session that saves to file"""
# Persistent session - saves to database file
session = SQLiteSession("user_123", "conversation_history.db")
print("\n=== Persistent Session Example ===")
# First conversation
result = await Runner.run(
root_agent,
"I'm a software developer working on AI projects.",
session=session
)
print(f"First message: {result.final_output}")
# Second conversation - context preserved
result = await Runner.run(
root_agent,
"What kind of work do I do?",
session=session
)
print(f"Follow-up: {result.final_output}")
return session
# Example 3: Multi-turn conversation (mimicking OpenAI SDK docs example)
async def multi_turn_conversation():
"""Demonstrates extended conversation with automatic memory like SDK docs"""
session = SQLiteSession("conversation_123", "conversations.db")
print("\n=== Multi-Turn Conversation (like SDK docs) ===")
# Similar to the OpenAI SDK documentation example
print("🌉 First turn:")
result = await Runner.run(root_agent, "What city is the Golden Gate Bridge in?", session=session)
print(f"User: What city is the Golden Gate Bridge in?")
print(f"Assistant: {result.final_output}")
print("\n🏛️ Second turn (agent remembers automatically):")
result = await Runner.run(root_agent, "What state is it in?", session=session)
print(f"User: What state is it in?")
print(f"Assistant: {result.final_output}")
print("\n👥 Third turn (continuing context):")
result = await Runner.run(root_agent, "What's the population of that state?", session=session)
print(f"User: What's the population of that state?")
print(f"Assistant: {result.final_output}")
print("\n💡 Notice how the agent remembers context automatically!")
print(" Sessions handle conversation history without manual .to_input_list()")
return session
# Example 4: Session comparison - with vs without sessions
async def session_comparison():
"""Demonstrates the difference between using sessions vs no sessions"""
print("\n=== Session vs No Session Comparison ===")
# Without session (no memory)
print("🚫 WITHOUT Sessions (no memory):")
result1 = await Runner.run(root_agent, "My name is Alice")
print(f"Turn 1: {result1.final_output}")
result2 = await Runner.run(root_agent, "What's my name?")
print(f"Turn 2: {result2.final_output}")
print(" ↪️ Agent doesn't remember - no session used")
# With session (automatic memory)
print(f"\n✅ WITH Sessions (automatic memory):")
session = SQLiteSession("comparison_demo", "comparison.db")
result3 = await Runner.run(root_agent, "My name is Alice", session=session)
print(f"Turn 1: {result3.final_output}")
result4 = await Runner.run(root_agent, "What's my name?", session=session)
print(f"Turn 2: {result4.final_output}")
print(" ↪️ Agent remembers - session automatically handles history!")
return session
# Main execution function
async def main():
"""Run all basic session examples"""
print("🧠 OpenAI Agents SDK - Basic Sessions Examples")
print("=" * 60)
await in_memory_session_example()
await persistent_session_example()
await multi_turn_conversation()
await session_comparison()
print("\n✅ Basic sessions examples completed!")
print("Key concepts demonstrated:")
print(" • In-memory sessions: SQLiteSession('session_id')")
print(" • Persistent sessions: SQLiteSession('session_id', 'file.db')")
print(" • Automatic memory: No manual .to_input_list() needed")
print(" • Session vs no session: Memory comparison")
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_2_memory_operations/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_2_memory_operations/__init__.py | # Memory Operations module for OpenAI Agents SDK tutorial
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_2_memory_operations/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/7_sessions/7_2_memory_operations/agent.py | from agents import Agent, Runner, SQLiteSession
# Create agent for memory operations demonstrations
root_agent = Agent(
name="Memory Operations Agent",
instructions="""
You are a helpful assistant demonstrating session memory operations.
Remember previous conversation context and reference it when relevant.
Reply concisely but show understanding of conversation history.
"""
)
# Example 1: Basic memory operations - get_items()
async def basic_memory_operations():
"""Demonstrates get_items, add_items, and session inspection from OpenAI SDK docs"""
session = SQLiteSession("memory_demo", "operations.db")
print("=== Basic Memory Operations ===")
# Start conversation
result = await Runner.run(root_agent, "Hello, my favorite color is blue.", session=session)
print(f"Agent Response: {result.final_output}")
# Demonstrate get_items() - retrieve conversation history
items = await session.get_items()
print(f"\n📋 Session Memory Inspection (get_items()):")
print(f" Total items in session: {len(items)}")
for i, item in enumerate(items, 1):
content_preview = item['content'][:50] + "..." if len(item['content']) > 50 else item['content']
print(f" {i}. [{item['role']}]: {content_preview}")
# Demonstrate add_items() - manually add conversation items
print(f"\n➕ Adding Custom Items (add_items()):")
custom_items = [
{"role": "user", "content": "I also love hiking and photography."},
{"role": "assistant", "content": "Wonderful! Blue, hiking, and photography - I'll remember these interests."}
]
await session.add_items(custom_items)
updated_items = await session.get_items()
print(f" Items after manual addition: {len(updated_items)} (was {len(items)})")
# Continue conversation with enriched context
result = await Runner.run(root_agent, "What hobbies do I have?", session=session)
print(f"\n🤖 Agent with enriched context: {result.final_output}")
return session
# Example 2: Using pop_item() for corrections (from OpenAI SDK docs)
async def conversation_corrections():
"""Demonstrates using pop_item to correct or undo conversation turns"""
session = SQLiteSession("correction_demo", "corrections.db")
print("\n=== Conversation Corrections with pop_item() ===")
# Initial question with wrong math
result = await Runner.run(root_agent, "What's 2 + 2?", session=session)
print(f"❓ Original Question: What's 2 + 2?")
print(f"🤖 Agent Answer: {result.final_output}")
print(f"\n📊 Items before correction: {len(await session.get_items())}")
# User wants to correct their question using pop_item()
print(f"\n🔄 Correcting conversation using pop_item()...")
# Remove assistant's response using pop_item()
assistant_item = await session.pop_item()
if assistant_item:
print(f" ↩️ Removed assistant response: {assistant_item['content'][:50]}...")
# Remove user's original question using pop_item()
user_item = await session.pop_item()
if user_item:
print(f" ↩️ Removed user question: {user_item['content']}")
print(f"📊 Items after corrections: {len(await session.get_items())}")
# Ask corrected question
result = await Runner.run(root_agent, "What's 2 + 3?", session=session)
print(f"\n✅ Corrected Question: What's 2 + 3?")
print(f"🤖 New Answer: {result.final_output}")
return session
# Example 3: clear_session() for session reset (from OpenAI SDK docs)
async def session_management():
"""Demonstrates clear_session() and session lifecycle management"""
session = SQLiteSession("management_demo", "management.db")
print("\n=== Session Management with clear_session() ===")
# Build up conversation history
print("🏗️ Building conversation history...")
await Runner.run(root_agent, "I work as a teacher.", session=session)
await Runner.run(root_agent, "I teach mathematics.", session=session)
await Runner.run(root_agent, "I love solving puzzles.", session=session)
items_before = await session.get_items()
print(f"📊 Session contains {len(items_before)} items before clearing")
# Test agent memory before clearing
result = await Runner.run(root_agent, "What do I do for work?", session=session)
print(f"🤖 Agent remembers: {result.final_output}")
# Demonstrate clear_session() - removes all conversation history
print(f"\n🧹 Clearing session with clear_session()...")
await session.clear_session()
items_after = await session.get_items()
print(f"📊 Session contains {len(items_after)} items after clearing")
# Test fresh conversation after clearing
result = await Runner.run(root_agent, "Do you know anything about me?", session=session)
print(f"🤖 Fresh conversation (no memory): {result.final_output}")
return session
# Example 4: Advanced memory inspection with get_items(limit)
async def memory_inspection():
"""Demonstrates get_items with limit parameter and detailed memory analysis"""
session = SQLiteSession("inspection_demo", "inspection.db")
print("\n=== Advanced Memory Inspection ===")
# Build longer conversation for inspection
conversation_items = [
"Hello, I'm learning about AI.",
"What is machine learning?",
"How does deep learning work?",
"What's the difference between AI and ML?",
"Can you explain neural networks?"
]
print("🏗️ Building extended conversation...")
for item in conversation_items:
await Runner.run(root_agent, item, session=session)
# Demonstrate get_items() with limit parameter (from SDK docs)
print(f"\n🔍 Memory Inspection with get_items(limit=3):")
recent_items = await session.get_items(limit=3)
print(f" Last 3 items (out of full conversation):")
for i, item in enumerate(recent_items, 1):
content_preview = item['content'][:60] + "..." if len(item['content']) > 60 else item['content']
print(f" {i}. [{item['role']}]: {content_preview}")
# Compare with full conversation
all_items = await session.get_items()
print(f"\n📊 Full conversation analysis:")
print(f" Total items in session: {len(all_items)}")
print(f" Recent items retrieved: {len(recent_items)}")
# Count items by role
user_items = [item for item in all_items if item['role'] == 'user']
assistant_items = [item for item in all_items if item['role'] == 'assistant']
print(f" User messages: {len(user_items)}")
print(f" Assistant responses: {len(assistant_items)}")
return session
# Main execution function
async def main():
"""Run all memory operations examples"""
import asyncio
print("🧠 OpenAI Agents SDK - Memory Operations Examples")
print("=" * 60)
await basic_memory_operations()
await conversation_corrections()
await session_management()
await memory_inspection()
print("\n✅ All memory operations examples completed!")
print("Key operations demonstrated:")
print(" • get_items() - Retrieve conversation history")
print(" • add_items() - Manually add conversation items")
print(" • pop_item() - Remove last item for corrections")
print(" • clear_session() - Reset conversation history")
print(" • get_items(limit=N) - Retrieve recent items only")
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/app.py | ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/app.py | """
Streamlit Web Interface for Tutorial 1: Your First Agent
This provides an interactive web interface to test the personal assistant agent
with different execution methods.
"""
import os
import asyncio
import streamlit as st
from dotenv import load_dotenv
from agents import Agent, Runner
# Load environment variables
load_dotenv()
# Page configuration
st.set_page_config(
page_title="Personal Assistant Agent",
page_icon="🎯",
layout="wide"
)
# Title and description
st.title("🎯 Personal Assistant Agent")
st.markdown("**Tutorial 1**: Your first OpenAI agent with different execution methods")
# Check API key
if not os.getenv("OPENAI_API_KEY"):
st.error("❌ OPENAI_API_KEY not found. Please create a .env file with your OpenAI API key.")
st.stop()
# Create the agent
@st.cache_resource
def create_agent():
return Agent(
name="Personal Assistant",
instructions="""
You are a helpful personal assistant.
Your role is to:
1. Answer questions clearly and concisely
2. Provide helpful information and advice
3. Be friendly and professional
4. Offer practical solutions to problems
When users ask questions:
- Give accurate and helpful responses
- Explain complex topics in simple terms
- Offer follow-up suggestions when appropriate
- Maintain a positive and supportive tone
Keep responses concise but informative.
"""
)
agent = create_agent()
# Sidebar with execution method selection
st.sidebar.title("Execution Methods")
execution_method = st.sidebar.selectbox(
"Choose execution method:",
["Synchronous", "Asynchronous", "Streaming"]
)
st.sidebar.markdown("---")
st.sidebar.markdown("### About Execution Methods")
if execution_method == "Synchronous":
st.sidebar.info("**Synchronous**: Blocks until response is complete. Simple and straightforward.")
elif execution_method == "Asynchronous":
st.sidebar.info("**Asynchronous**: Non-blocking execution. Good for concurrent operations.")
else:
st.sidebar.info("**Streaming**: Real-time response streaming. Great for long responses.")
# Main chat interface
st.markdown("### Chat Interface")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Ask your personal assistant anything..."):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Generate assistant response
with st.chat_message("assistant"):
try:
if execution_method == "Synchronous":
with st.spinner("Thinking..."):
result = Runner.run_sync(agent, prompt)
response = result.final_output
st.markdown(response)
elif execution_method == "Asynchronous":
with st.spinner("Processing asynchronously..."):
async def get_async_response():
result = await Runner.run(agent, prompt)
return result.final_output
response = asyncio.run(get_async_response())
st.markdown(response)
else: # Streaming
response_placeholder = st.empty()
response_text = ""
async def stream_response():
full_response = ""
async for event in Runner.run_streamed(agent, prompt):
if hasattr(event, 'content') and event.content:
full_response += event.content
response_placeholder.markdown(full_response + "▌")
response_placeholder.markdown(full_response)
return full_response
response = asyncio.run(stream_response())
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
except Exception as e:
error_msg = f"❌ Error: {str(e)}"
st.error(error_msg)
st.session_state.messages.append({"role": "assistant", "content": error_msg})
# Clear chat button
if st.sidebar.button("Clear Chat History"):
st.session_state.messages = []
st.rerun()
# Example prompts
st.sidebar.markdown("---")
st.sidebar.markdown("### Example Prompts")
example_prompts = [
"What are 3 productivity tips for remote work?",
"Explain quantum computing in simple terms",
"Write a short poem about technology",
"How can I improve my focus and concentration?",
"What's the difference between AI and machine learning?"
]
for prompt in example_prompts:
if st.sidebar.button(prompt, key=f"example_{prompt[:20]}"):
# Add the example prompt to chat
st.session_state.messages.append({"role": "user", "content": prompt})
st.rerun()
# Footer with tutorial information
st.markdown("---")
st.markdown("""
### 📚 Tutorial Information
This is **Tutorial 1** of the OpenAI Agents SDK crash course. You're learning:
- ✅ Basic agent creation with the Agent class
- ✅ Different execution methods (sync, async, streaming)
- ✅ Agent configuration with instructions
- ✅ Interactive web interfaces with Streamlit
**Next**: Try [Tutorial 2: Structured Output Agent](../2_structured_output_agent/) to learn about type-safe responses.
""")
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/1_personal_assistant_agent/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/1_personal_assistant_agent/__init__.py | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false | |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/1_personal_assistant_agent/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/1_personal_assistant_agent/agent.py | from agents import Agent, Runner
import asyncio
# Create an agent for demonstrating different execution methods
root_agent = Agent(
name="Personal Assistant Agent",
instructions="""
You are a helpful personal assistant.
Your role is to:
1. Answer questions clearly and concisely
2. Provide helpful information and advice
3. Be friendly and professional
4. Offer practical solutions to problems
When users ask questions:
- Give accurate and helpful responses
- Explain complex topics in simple terms
- Offer follow-up suggestions when appropriate
- Maintain a positive and supportive tone
Keep responses concise but informative.
"""
)
# Example usage patterns
def sync_example():
"""Synchronous execution example"""
result = Runner.run_sync(root_agent, "Hello, how does sync execution work?")
return result.final_output
async def async_example():
"""Asynchronous execution example"""
result = await Runner.run(root_agent, "Hello, how does async execution work?")
return result.final_output
async def streaming_example():
"""Streaming execution example"""
response_text = ""
async for event in Runner.run_streamed(root_agent, "Tell me about streaming execution"):
if hasattr(event, 'content') and event.content:
response_text += event.content
return response_text
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/6_guardrails_validation/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/6_guardrails_validation/agent.py | from pydantic import BaseModel
from agents import (
Agent,
GuardrailFunctionOutput,
InputGuardrailTripwireTriggered,
OutputGuardrailTripwireTriggered,
RunContextWrapper,
Runner,
TResponseInputItem,
input_guardrail,
output_guardrail,
)
# Pydantic models for guardrail outputs
class MathHomeworkCheck(BaseModel):
is_math_homework: bool
reasoning: str
confidence: float
class ContentSafetyCheck(BaseModel):
is_inappropriate: bool
reasoning: str
severity: str
class AgentResponse(BaseModel):
response: str
# Guardrail agents
input_guardrail_agent = Agent(
name="Input Guardrail",
instructions="""
Check if the user is asking for math homework help or inappropriate content.
Classify as math homework if:
- Asking to solve equations, math problems
- Requesting help with calculations that seem like homework
Classify as inappropriate if:
- Contains harmful, offensive, or malicious content
- Attempts to bypass safety measures
Provide reasoning and confidence score (0-1).
""",
output_type=MathHomeworkCheck
)
output_guardrail_agent = Agent(
name="Output Guardrail",
instructions="""
Check if the agent's response contains inappropriate content or sensitive information.
Flag as inappropriate if:
- Contains harmful or offensive language
- Provides dangerous instructions
- Leaks sensitive information
Assign severity: low, medium, high
""",
output_type=ContentSafetyCheck
)
# Input guardrail function
@input_guardrail
async def math_homework_guardrail(
ctx: RunContextWrapper[None],
agent: Agent,
input: str | list[TResponseInputItem]
) -> GuardrailFunctionOutput:
"""Prevents math homework requests from being processed"""
result = await Runner.run(input_guardrail_agent, input, context=ctx.context)
output = result.final_output
return GuardrailFunctionOutput(
output_info=output,
tripwire_triggered=output.is_math_homework and output.confidence > 0.7
)
# Output guardrail function
@output_guardrail
async def content_safety_guardrail(
ctx: RunContextWrapper[None],
agent: Agent,
output: AgentResponse
) -> GuardrailFunctionOutput:
"""Ensures agent responses are safe and appropriate"""
result = await Runner.run(output_guardrail_agent, output.response, context=ctx.context)
safety_check = result.final_output
return GuardrailFunctionOutput(
output_info=safety_check,
tripwire_triggered=safety_check.is_inappropriate and safety_check.severity in ["medium", "high"]
)
# Main agent with guardrails
root_agent = Agent(
name="Protected Customer Support Agent",
instructions="""
You are a helpful customer support agent.
You help customers with:
- Product questions and information
- Account issues and support
- General inquiries and guidance
You DO NOT help with:
- Academic homework (especially math)
- Inappropriate or harmful requests
- Sensitive or confidential information
Be helpful but maintain appropriate boundaries.
""",
input_guardrails=[math_homework_guardrail],
output_guardrails=[content_safety_guardrail],
output_type=AgentResponse
)
# Example usage with guardrails
async def guardrails_example():
"""Demonstrates guardrails with various inputs"""
test_cases = [
"How do I reset my password?", # Should pass
"Can you solve this equation: 2x + 5 = 15?", # Should trigger input guardrail
"What are your product features?", # Should pass
]
for i, test_input in enumerate(test_cases, 1):
print(f"\n--- Test Case {i}: {test_input} ---")
try:
result = await Runner.run(root_agent, test_input)
print(f"✅ Success: {result.final_output.response}")
except InputGuardrailTripwireTriggered as e:
print(f"🚫 Input Guardrail Triggered: {e}")
except OutputGuardrailTripwireTriggered as e:
print(f"⚠️ Output Guardrail Triggered: {e}")
except Exception as e:
print(f"❌ Error: {e}")
# Standalone example functions
async def test_input_guardrail():
"""Test input guardrail specifically"""
try:
await Runner.run(root_agent, "Can you help me solve this calculus problem?")
print("❌ Guardrail should have triggered")
except InputGuardrailTripwireTriggered:
print("✅ Input guardrail correctly triggered for math homework")
async def test_valid_request():
"""Test valid customer support request"""
result = await Runner.run(root_agent, "I'm having trouble logging into my account. Can you help?")
print(f"✅ Valid request processed: {result.final_output.response}")
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/2_model_agnostic_agent/2_1_openai_adk_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/2_model_agnostic_agent/2_1_openai_adk_agent/__init__.py | from . import agent
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/2_model_agnostic_agent/2_1_openai_adk_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/2_model_agnostic_agent/2_1_openai_adk_agent/agent.py | import os
import random
from google.adk.agents import Agent
from google.adk.models.lite_llm import LiteLlm
def get_fun_fact():
"""Return a random fun fact"""
facts = [
"Honey never spoils. Archaeologists have found pots of honey in ancient Egyptian tombs that are over 3,000 years old and still perfectly edible.",
"Octopuses have three hearts and blue blood.",
"A group of flamingos is called a 'flamboyance'.",
"Bananas are berries, but strawberries aren't.",
"A day on Venus is longer than its year.",
"Wombat poop is cube-shaped.",
"There are more possible games of chess than atoms in the observable universe.",
"Dolphins have names for each other.",
]
return random.choice(facts)
# OpenAI model via OpenRouter
model = LiteLlm(
model="openrouter/openai/gpt-4o",
api_key=os.getenv("OPENROUTER_API_KEY")
)
root_agent = Agent(
name="openai_adk_agent",
model=model,
description="Fun fact agent using OpenAI GPT-4 via OpenRouter",
instruction="""
You are a helpful assistant powered by OpenAI GPT-4 that shares interesting fun facts.
Use the `get_fun_fact` tool when users ask for a fun fact or interesting information.
Be enthusiastic and friendly in your responses.
Always mention that you're powered by OpenAI GPT-4 when introducing yourself.
""",
tools=[get_fun_fact],
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/2_model_agnostic_agent/2_2_anthropic_adk_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/2_model_agnostic_agent/2_2_anthropic_adk_agent/__init__.py | from . import agent
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/2_model_agnostic_agent/2_2_anthropic_adk_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/2_model_agnostic_agent/2_2_anthropic_adk_agent/agent.py | import os
import random
from google.adk.agents import Agent
from google.adk.models.lite_llm import LiteLlm
def get_fun_fact():
"""Return a random fun fact"""
facts = [
"Honey never spoils. Archaeologists have found pots of honey in ancient Egyptian tombs that are over 3,000 years old and still perfectly edible.",
"Octopuses have three hearts and blue blood.",
"A group of flamingos is called a 'flamboyance'.",
"Bananas are berries, but strawberries aren't.",
"A day on Venus is longer than its year.",
"Wombat poop is cube-shaped.",
"There are more possible games of chess than atoms in the observable universe.",
"Dolphins have names for each other.",
]
return random.choice(facts)
# Anthropic model via OpenRouter
model = LiteLlm(
model="openrouter/anthropic/claude-sonnet-4-20250514",
api_key=os.getenv("OPENROUTER_API_KEY")
)
root_agent = Agent(
name="anthropic_adk_agent",
model=model,
description="Fun fact agent using Anthropic Claude 4 Sonnet via OpenRouter",
instruction="""
You are a helpful assistant powered by Anthropic Claude 4 Sonnet that shares interesting fun facts.
Use the `get_fun_fact` tool when users ask for a fun fact or interesting information.
Be enthusiastic and friendly in your responses.
Always mention that you're powered by Anthropic Claude when introducing yourself.
""",
tools=[get_fun_fact],
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/8_simple_multi_agent/multi_agent_researcher/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/8_simple_multi_agent/multi_agent_researcher/__init__.py | from .agent import root_agent
__all__ = ['root_agent'] | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/8_simple_multi_agent/multi_agent_researcher/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/8_simple_multi_agent/multi_agent_researcher/agent.py | from google.adk.agents import LlmAgent
from google.adk.tools.agent_tool import AgentTool
from google.adk.tools import google_search
# --- Sub-agents ---
research_agent = LlmAgent(
name="research_agent",
model="gemini-3-flash-preview",
description="Finds key information and outlines for a given topic.",
instruction=(
"You are a focused research specialist. Given a user topic or goal, "
"conduct thorough research and produce:\n"
"1. A comprehensive bullet list of key facts and findings\n"
"2. Relevant sources and references (when available)\n"
"3. A structured outline for approaching the topic\n"
"4. Current trends or recent developments\n\n"
"Keep your research factual, well-organized, and comprehensive. "
"Use the google_search tool to find current information when needed."
),
tools=[google_search]
)
summarizer_agent = LlmAgent(
name="summarizer_agent",
model="gemini-3-flash-preview",
description="Summarizes research findings clearly and concisely.",
instruction=(
"You are a skilled summarizer. Given research findings, create:\n"
"1. A concise executive summary (2-3 sentences)\n"
"2. 5-7 key bullet points highlighting the most important information\n"
"3. A clear takeaway message\n"
"4. Any critical insights or patterns you notice\n\n"
"Focus on clarity, relevance, and actionable insights. "
"Avoid repetition and maintain the logical flow of information."
),
)
critic_agent = LlmAgent(
name="critic_agent",
model="gemini-3-flash-preview",
description="Provides constructive critique and improvement suggestions.",
instruction=(
"You are a thoughtful analyst and critic. Given research and summaries, provide:\n"
"1. **Gap Analysis**: Identify missing information or areas that need more research\n"
"2. **Risk Assessment**: Highlight potential risks, limitations, or biases\n"
"3. **Opportunity Identification**: Suggest areas for further exploration or improvement\n"
"4. **Quality Score**: Rate the overall research quality (1-10) with justification\n"
"5. **Actionable Recommendations**: Provide specific next steps or improvements\n\n"
"Be constructive, thorough, and evidence-based in your analysis."
),
)
# --- Coordinator (root) agent ---
root_agent = LlmAgent(
name="multi_agent_researcher",
model="gemini-3-flash-preview",
description="Advanced multi-agent research coordinator that orchestrates research, analysis, and critique.",
instruction=(
"You are an advanced research coordinator managing a team of specialized agents.\n\n"
"**Your Research Team:**\n"
"- **research_agent**: Conducts comprehensive research using web search and analysis\n"
"- **summarizer_agent**: Synthesizes findings into clear, actionable insights\n"
"- **critic_agent**: Provides quality analysis, gap identification, and recommendations\n\n"
"**Research Workflow:**\n"
"1. **Research Phase**: Delegate to research_agent to gather comprehensive information\n"
"2. **Synthesis Phase**: Use summarizer_agent to distill findings into key insights\n"
"3. **Analysis Phase**: Engage critic_agent to evaluate quality and identify opportunities\n"
"4. **Integration**: Combine all outputs into a cohesive research report\n\n"
"**For Each Research Request:**\n"
"- Always start with research_agent to gather information\n"
"- Then use summarizer_agent to create clear summaries\n"
"- Finally, engage critic_agent for quality analysis and recommendations\n"
"- Present the final integrated research report to the user\n\n"
"**Output Format:**\n"
"Provide a structured response that includes:\n"
"- Executive Summary\n"
"- Key Findings\n"
"- Critical Analysis\n"
"- Recommendations\n"
"- Next Steps\n\n"
"Coordinate your team effectively to deliver high-quality, comprehensive research."
),
sub_agents=[summarizer_agent, critic_agent],
tools=[AgentTool(research_agent)]
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_3_thirdparty_tools/langchain_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_3_thirdparty_tools/langchain_agent/__init__.py | from . import agent | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_3_thirdparty_tools/langchain_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_3_thirdparty_tools/langchain_agent/agent.py | from google.adk.agents import LlmAgent
from google.adk.tools.langchain_tool import LangchainTool
from langchain_community.tools import DuckDuckGoSearchRun, WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
# Create LangChain tools
search_tool = LangchainTool(DuckDuckGoSearchRun())
wiki_tool = LangchainTool(WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper()))
# Create an agent with LangChain tools
root_agent = LlmAgent(
name="langchain_agent",
model="gemini-3-flash-preview",
description="A research agent that uses LangChain tools for web search and Wikipedia queries",
instruction="""
You are a research assistant with access to powerful LangChain tools.
Your capabilities include:
**Web Search (DuckDuckGo):**
- Search the web for current information
- Find recent news and developments
- Discover websites and resources
- Get real-time information
**Wikipedia Search:**
- Search Wikipedia for encyclopedic information
- Get detailed articles on topics
- Access historical and factual information
- Find comprehensive background information
**Available Tools:**
- `DuckDuckGoSearchRun`: Web search using DuckDuckGo
- `WikipediaQueryRun`: Wikipedia article search and retrieval
**Guidelines:**
1. For recent news or current events, use DuckDuckGo search
2. For factual, encyclopedic information, use Wikipedia
3. Combine results from both sources when helpful
4. Always cite your sources
5. Be clear about which tool you're using
**Example workflows:**
- "What's the latest news about AI?" → Use DuckDuckGo search
- "Tell me about the history of Rome" → Use Wikipedia search
- "Current stock market trends" → Use DuckDuckGo search
- "Information about photosynthesis" → Use Wikipedia search
- "Recent developments in renewable energy" → Use DuckDuckGo search
You can also use both tools for comprehensive research:
- Wikipedia for background information
- DuckDuckGo for current developments
Always provide helpful, accurate information and explain your research process.
""",
tools=[search_tool, wiki_tool]
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_3_thirdparty_tools/crewai_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_3_thirdparty_tools/crewai_agent/__init__.py | from . import agent | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_3_thirdparty_tools/crewai_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_3_thirdparty_tools/crewai_agent/agent.py | from google.adk.agents import LlmAgent
from google.adk.tools.crewai_tool import CrewaiTool
from crewai_tools import (
ScrapeWebsiteTool,
DirectorySearchTool,
FileReadTool
)
scrape_website_tool = CrewaiTool(
name="scrape_website",
description="Scrape and extract content from websites",
tool=ScrapeWebsiteTool(
config=dict(
llm=dict(
provider="google",
config=dict(model="gemini-3-flash-preview"),
),
embedder=dict(
provider="google",
config=dict(
model="gemini-embedding-001",
task_type="retrieval_document",
),
),
)
)
)
directory_search_tool = CrewaiTool(
name="directory_search",
description="Search for files and directories in the local filesystem",
tool=DirectorySearchTool(
config=dict(
llm=dict(
provider="google",
config=dict(model="gemini-3-flash-preview"),
),
embedder=dict(
provider="google",
config=dict(
model="gemini-embedding-001",
task_type="retrieval_document",
),
),
)
)
)
file_read_tool = CrewaiTool(
name="file_read",
description="Read and analyze content from files",
tool=FileReadTool(
config=dict(
llm=dict(
provider="google",
config=dict(model="gemini-3-flash-preview"),
),
embedder=dict(
provider="google",
config=dict(
model="gemini-embedding-001",
task_type="retrieval_document",
),
),
)
)
)
# Create an agent with CrewAI tools
root_agent = LlmAgent(
name="crewai_agent",
model="gemini-3-flash-preview",
description="A versatile agent that uses CrewAI tools for web scraping, file operations, and content analysis",
instruction="""
You are a versatile assistant with access to powerful CrewAI tools for web scraping,
file operations, and content analysis.
Your capabilities include:
**Web Operations:**
- Website content search and analysis
- Web scraping and data extraction
- Content retrieval from specific URLs
- Website structure analysis
**File Operations:**
- Directory and file system search
- File reading and content analysis
- Local file processing
- Document analysis
**Available Tools:**
- `ScrapeWebsiteTool`: Extract and scrape content from web pages
- `DirectorySearchTool`: Search local directories and file systems
- `FileReadTool`: Read and analyze local files
**Guidelines:**
1. For web content analysis, use ScrapeWebsiteTool
2. For file operations, use DirectorySearchTool and FileReadTool
3. Always explain what tool you're using and why
4. Provide clear summaries of extracted content
5. Handle errors gracefully and suggest alternatives
6. Respect website terms of service and robots.txt
**Example workflows:**
- "Search for pricing information on company.com" → Use ScrapeWebsiteTool
- "Extract all headings from this webpage" → Use ScrapeWebsiteTool
- "Find all Python files in this directory" → Use DirectorySearchTool
- "Read and summarize this document" → Use FileReadTool
- "Analyze the structure of this website" → Use ScrapeWebsiteTool
**Use Cases:**
- Content research and analysis
- Web scraping for data extraction
- File system exploration
- Document processing and analysis
- Website structure analysis
Always provide helpful, accurate information and explain your process clearly.
Be respectful of website policies and handle sensitive information appropriately.
""",
tools=[
scrape_website_tool,
directory_search_tool,
file_read_tool
]
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_4_mcp_tools/firecrawl_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_4_mcp_tools/firecrawl_agent/__init__.py | from . import agent | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_4_mcp_tools/firecrawl_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_4_mcp_tools/firecrawl_agent/agent.py | """
Firecrawl Agent - Advanced Web Scraping with MCP Tools Integration
This example demonstrates how to connect an ADK agent to a Firecrawl MCP server
using the MCPToolset. The agent can perform advanced web scraping operations like
single page scraping, batch scraping, web crawling, content extraction, and deep research.
"""
import os
from google.adk.agents import LlmAgent
from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset, StdioServerParameters
# Create the ADK agent with Firecrawl MCP tools
root_agent = LlmAgent(
model='gemini-3-flash-preview',
name='firecrawl_mcp_agent',
instruction="""
You are an advanced web scraping and research assistant powered by Firecrawl.
You have access to comprehensive web scraping tools through the Model Context Protocol (MCP):
🔧 **Available Tools:**
- **firecrawl_scrape**: Extract content from a single URL with advanced options
- **firecrawl_batch_scrape**: Efficiently scrape multiple URLs with parallel processing
- **firecrawl_map**: Discover all URLs on a website for exploration
- **firecrawl_search**: Search the web and extract content from results
- **firecrawl_crawl**: Perform comprehensive website crawling with depth control
- **firecrawl_extract**: Extract structured data using AI-powered analysis
- **firecrawl_deep_research**: Conduct in-depth research with multi-source analysis
- **firecrawl_generate_llmstxt**: Generate LLMs.txt files for domains
- **firecrawl_check_crawl_status**: Monitor crawl job progress
- **firecrawl_check_batch_status**: Monitor batch operation progress
🎯 **Tool Selection Guide:**
- **Single URL**: Use `firecrawl_scrape`
- **Multiple known URLs**: Use `firecrawl_batch_scrape`
- **Discover URLs**: Use `firecrawl_map`
- **Web search**: Use `firecrawl_search`
- **Structured data**: Use `firecrawl_extract`
- **Deep research**: Use `firecrawl_deep_research`
- **Full site analysis**: Use `firecrawl_crawl` (with caution on limits)
🌟 **Key Features:**
- Automatic rate limiting and retry logic
- Parallel processing for batch operations
- LLM-powered content extraction
- Support for multiple output formats (Markdown, HTML, JSON)
- Advanced filtering and content selection
- Mobile and desktop rendering options
💡 **Best Practices:**
- Always explain which tool you're using and why
- For large operations, inform users about potential wait times
- Use batch operations for multiple URLs instead of individual scrapes
- Leverage structured extraction for specific data needs
- Respect rate limits and be considerate of target websites
🚨 **Important Notes:**
- Crawl operations can be resource-intensive; use appropriate limits
- Batch operations are queued and may take time to complete
- Always check the status of long-running operations
- Some tools require a valid Firecrawl API key
Be helpful, efficient, and always explain your approach to web scraping tasks.
""",
tools=[
MCPToolset(
connection_params=StdioServerParameters(
command='npx',
args=[
"-y", # Auto-confirm npm package installation
"firecrawl-mcp", # The Firecrawl MCP server package
],
env={
# Note: Users need to set FIRECRAWL_API_KEY in their environment
# or add it to their system environment variables
"FIRECRAWL_API_KEY": os.getenv("FIRECRAWL_API_KEY", "")
}
),
# Optional: Filter which tools from the MCP server to expose
# Uncomment the line below to limit to specific tools
# tool_filter=['firecrawl_scrape', 'firecrawl_batch_scrape', 'firecrawl_search', 'firecrawl_map']
)
],
)
# Export the agent for use with ADK web
__all__ = ['root_agent']
# Example usage in a script
if __name__ == "__main__":
print("🔥 Firecrawl MCP Agent initialized!")
print("\n🔧 Available Capabilities:")
print("- Single page scraping with advanced options")
print("- Batch processing of multiple URLs")
print("- Website mapping and URL discovery")
print("- Web search with content extraction")
print("- Comprehensive website crawling")
print("- AI-powered structured data extraction")
print("- Deep research with multi-source analysis")
print("- LLMs.txt generation for domains")
print("\n🚀 To use this agent:")
print("1. Set your Firecrawl API key: export FIRECRAWL_API_KEY=your_api_key")
print("2. Run 'adk web' from the tutorials root directory")
print("3. Select 'firecrawl_mcp_agent' from the dropdown")
print("\n💡 Example commands to try:")
print(" - 'Scrape the homepage of https://example.com'")
print(" - 'Find all blog post URLs on https://blog.example.com'")
print(" - 'Search for recent AI research papers and extract key information'")
print(" - 'Extract product details from this e-commerce page: [URL]'")
print(" - 'Perform deep research on sustainable energy technologies'")
print(" - 'Crawl the documentation section of https://docs.example.com'")
print("\n⚠️ Important Setup:")
print("- Requires Node.js for the Firecrawl MCP server")
print("- Requires a valid Firecrawl API key (get one at https://firecrawl.dev)")
print("- Some operations may take time for large datasets") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_4_mcp_tools/filesystem_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_4_mcp_tools/filesystem_agent/__init__.py | from . import agent | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_4_mcp_tools/filesystem_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_4_mcp_tools/filesystem_agent/agent.py | """
Filesystem Agent - MCP Tools Integration Example
This example demonstrates how to connect an ADK agent to a filesystem MCP server
using the MCPToolset. The agent can perform file operations like reading, writing,
and listing files through the MCP protocol.
"""
import os
from google.adk.agents import LlmAgent
from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset, StdioServerParameters
# Create a temporary directory for demonstration
# In a real application, you would use a specific folder path
DEMO_FOLDER = os.path.join(os.path.dirname(__file__), "..")
# Ensure the demo folder exists
os.makedirs(DEMO_FOLDER, exist_ok=True)
# Create a sample file for demonstration
sample_file_path = os.path.join(DEMO_FOLDER, "sample.txt")
with open(sample_file_path, "w") as f:
f.write("This is a sample file for the MCP filesystem agent demonstration.\n")
f.write("You can read, write, and list files using MCP tools.\n")
# Create the ADK agent with MCP filesystem tools
root_agent = LlmAgent(
model='gemini-3-flash-preview',
name='filesystem_mcp_agent',
instruction=f"""
You are a helpful filesystem assistant that can help users manage their files.
You have access to filesystem tools through the Model Context Protocol (MCP).
You can:
- List files and directories
- Read file contents
- Write to files
- Create directories
The current working directory is: {DEMO_FOLDER}
Always be helpful and explain what you're doing when performing file operations.
If a user asks about files, use the available tools to check the filesystem.
""",
tools=[
MCPToolset(
connection_params=StdioServerParameters(
command='npx',
args=[
"-y", # Auto-confirm npm package installation
"@modelcontextprotocol/server-filesystem",
DEMO_FOLDER, # The directory path the MCP server can access
],
),
# Optional: Filter which tools from the MCP server to expose
# tool_filter=['list_directory', 'read_file', 'write_file']
)
],
)
# Export the agent for use with ADK web
__all__ = ['root_agent']
# Example usage in a script
if __name__ == "__main__":
print(f"Filesystem MCP Agent initialized!")
print(f"Demo folder: {DEMO_FOLDER}")
print(f"Sample file created at: {sample_file_path}")
print("\nTo use this agent:")
print("1. Run 'adk web' from the tutorials root directory")
print("2. Select 'filesystem_mcp_agent' from the dropdown")
print("3. Try commands like:")
print(" - 'List files in the current directory'")
print(" - 'Read the contents of sample.txt'")
print(" - 'Create a new file called hello.txt with the content Hello World!'")
print(" - 'Show me all text files in the directory'") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_1_builtin_tools/code_exec_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_1_builtin_tools/code_exec_agent/__init__.py | from . import agent | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_1_builtin_tools/code_exec_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_1_builtin_tools/code_exec_agent/agent.py | from google.adk.agents import LlmAgent
from google.adk.code_executors import BuiltInCodeExecutor
# Create a code execution agent using Google ADK's built-in Code Execution Tool
root_agent = LlmAgent(
name="code_exec_agent",
model="gemini-3-flash-preview",
description="A computational agent that can execute Python code safely",
instruction="""
You are a computational assistant with the ability to execute Python code safely.
Your role is to help users with:
- Mathematical calculations and computations
- Data analysis and processing
- Algorithm implementation and testing
- Code debugging and verification
- Data visualization and charting
Key capabilities:
- Execute Python code in a secure sandbox environment
- Perform complex mathematical calculations
- Process and analyze data
- Create visualizations and charts
- Test algorithms and logic
When users request computational tasks:
1. Write appropriate Python code to solve the problem
2. Execute the code using the code execution tool
3. Explain the results and any insights
4. Provide the code used for transparency
Examples of tasks you can handle:
- "Calculate the compound interest for $1000 at 5% for 10 years"
- "Sort this list of numbers: [64, 34, 25, 12, 22, 11, 90]"
- "Create a simple visualization of sales data"
- "Find the prime numbers between 1 and 100"
- "Calculate the Fibonacci sequence up to 20 terms"
Always:
- Show the code you're executing
- Explain the logic and approach
- Interpret results for the user
- Handle errors gracefully and suggest fixes
Safety note: Code execution happens in a secure sandbox environment.
""",
code_executor=BuiltInCodeExecutor()
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_1_builtin_tools/search_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_1_builtin_tools/search_agent/__init__.py | from . import agent | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_1_builtin_tools/search_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_1_builtin_tools/search_agent/agent.py | from google.adk.agents import LlmAgent
from google.adk.tools import google_search
# Create a web search agent using Google ADK's built-in Search Tool
root_agent = LlmAgent(
name="search_agent",
model="gemini-3-flash-preview",
description="A research agent that can search the web for real-time information",
instruction="""
You are a research assistant with access to real-time web search capabilities.
Your role is to help users find current, accurate information from the web.
Key capabilities:
- Search the web for recent news, facts, and information
- Provide accurate, up-to-date responses based on search results
- Cite sources when presenting information
- Clarify when information might be outdated or uncertain
When users ask for information:
1. Use the search tool to find relevant, current information
2. Synthesize the search results into a clear, comprehensive response
3. Include source links when possible
4. Mention if the information is from a specific time period
Examples of queries you can handle:
- "What's the latest news about artificial intelligence?"
- "Current stock price of Tesla"
- "Recent developments in renewable energy"
- "Today's weather in San Francisco"
- "Latest updates on space exploration"
Always prioritize accuracy and recency of information. If search results are
conflicting, present multiple perspectives and mention the discrepancy.
""",
tools=[google_search]
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_2_function_tools/utility_agent/tools.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_2_function_tools/utility_agent/tools.py | import json
import re
import uuid
from datetime import datetime, timedelta
from typing import Dict, Union, List
from urllib.parse import urlparse
import hashlib
import base64
def process_text(text: str, operation: str) -> Dict[str, Union[str, int]]:
"""
Process text with various operations like counting, formatting, and transforming.
Use this function when users need text processing, formatting, or analysis.
Available operations: count_words, count_chars, uppercase, lowercase, title_case,
reverse, remove_spaces, extract_emails, extract_urls, word_frequency.
Args:
text: Input text to process
operation: Type of operation to perform
Returns:
Dictionary with processed text results
"""
try:
if not text:
return {"error": "Empty text provided", "status": "error"}
operations = {
"count_words": lambda t: {"word_count": len(t.split()), "text": t},
"count_chars": lambda t: {"char_count": len(t), "char_count_no_spaces": len(t.replace(" ", "")), "text": t},
"uppercase": lambda t: {"result": t.upper(), "original": t},
"lowercase": lambda t: {"result": t.lower(), "original": t},
"title_case": lambda t: {"result": t.title(), "original": t},
"reverse": lambda t: {"result": t[::-1], "original": t},
"remove_spaces": lambda t: {"result": re.sub(r'\s+', '', t), "original": t},
"extract_emails": lambda t: {"emails": re.findall(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', t), "original": t},
"extract_urls": lambda t: {"urls": re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', t), "original": t},
"word_frequency": lambda t: {"word_frequency": dict(sorted([(word.lower(), t.lower().split().count(word.lower())) for word in set(t.split())], key=lambda x: x[1], reverse=True)), "original": t}
}
if operation not in operations:
return {
"error": f"Invalid operation. Available: {', '.join(operations.keys())}",
"status": "error"
}
result = operations[operation](text)
result["operation"] = operation
result["status"] = "success"
return result
except Exception as e:
return {
"error": f"Error processing text: {str(e)}",
"status": "error"
}
def format_datetime(date_input: str, input_format: str, output_format: str) -> Dict[str, Union[str, Dict]]:
"""
Format and convert datetime strings between different formats.
Use this function when users need to convert date/time formats or parse dates.
Common formats: '%Y-%m-%d', '%d/%m/%Y', '%Y-%m-%d %H:%M:%S', '%B %d, %Y'
Args:
date_input: Input date string
input_format: Format of the input date (Python strftime format)
output_format: Desired output format (Python strftime format)
Returns:
Dictionary with formatted date results
"""
try:
# Parse the input date
parsed_date = datetime.strptime(date_input, input_format)
# Format to output format
formatted_date = parsed_date.strftime(output_format)
return {
"formatted_date": formatted_date,
"original": date_input,
"input_format": input_format,
"output_format": output_format,
"parsed_info": {
"year": parsed_date.year,
"month": parsed_date.month,
"day": parsed_date.day,
"weekday": parsed_date.strftime("%A"),
"month_name": parsed_date.strftime("%B")
},
"status": "success"
}
except ValueError as e:
return {
"error": f"Date parsing error: {str(e)}",
"date_input": date_input,
"input_format": input_format,
"status": "error"
}
except Exception as e:
return {
"error": f"Error formatting date: {str(e)}",
"status": "error"
}
def calculate_date_difference(date1: str, date2: str, date_format: str) -> Dict[str, Union[str, int, Dict]]:
"""
Calculate the difference between two dates.
Use this function when users need to find the time difference between dates,
calculate age, or determine duration between events.
Args:
date1: First date string
date2: Second date string
date_format: Format of both dates (Python strftime format)
Returns:
Dictionary with date difference calculations
"""
try:
# Parse both dates
parsed_date1 = datetime.strptime(date1, date_format)
parsed_date2 = datetime.strptime(date2, date_format)
# Calculate difference
diff = parsed_date2 - parsed_date1
# Calculate various difference formats
total_seconds = int(diff.total_seconds())
days = diff.days
hours = total_seconds // 3600
minutes = total_seconds // 60
# Calculate years, months, days breakdown
years = days // 365
remaining_days = days % 365
months = remaining_days // 30
remaining_days = remaining_days % 30
return {
"difference": {
"total_days": days,
"total_hours": hours,
"total_minutes": minutes,
"total_seconds": total_seconds,
"breakdown": {
"years": years,
"months": months,
"days": remaining_days
}
},
"date1": date1,
"date2": date2,
"date_format": date_format,
"status": "success"
}
except ValueError as e:
return {
"error": f"Date parsing error: {str(e)}",
"date1": date1,
"date2": date2,
"status": "error"
}
except Exception as e:
return {
"error": f"Error calculating date difference: {str(e)}",
"status": "error"
}
def generate_uuid(version: int = 4) -> Dict[str, Union[str, int]]:
"""
Generate a UUID (Universally Unique Identifier).
Use this function when users need unique identifiers for databases,
sessions, or any application requiring unique IDs.
Args:
version: UUID version (1, 4, or 5). Default is 4 (random)
Returns:
Dictionary with generated UUID information
"""
try:
if version == 1:
generated_uuid = str(uuid.uuid1())
elif version == 4:
generated_uuid = str(uuid.uuid4())
elif version == 5:
# UUID5 requires a namespace and name, using default
generated_uuid = str(uuid.uuid5(uuid.NAMESPACE_DNS, 'example.com'))
else:
return {
"error": "Invalid UUID version. Use 1, 4, or 5",
"status": "error"
}
return {
"uuid": generated_uuid,
"version": version,
"format": "8-4-4-4-12 hexadecimal digits",
"status": "success"
}
except Exception as e:
return {
"error": f"Error generating UUID: {str(e)}",
"status": "error"
}
def hash_text(text: str, algorithm: str = "sha256") -> Dict[str, Union[str, Dict]]:
"""
Generate hash values for text using various algorithms.
Use this function when users need to hash passwords, create checksums,
or generate unique fingerprints for text.
Args:
text: Text to hash
algorithm: Hash algorithm (md5, sha1, sha256, sha512)
Returns:
Dictionary with hash results
"""
try:
if not text:
return {"error": "Empty text provided", "status": "error"}
algorithms = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512
}
if algorithm not in algorithms:
return {
"error": f"Invalid algorithm. Available: {', '.join(algorithms.keys())}",
"status": "error"
}
hash_object = algorithms[algorithm]()
hash_object.update(text.encode('utf-8'))
hash_hex = hash_object.hexdigest()
return {
"hash": hash_hex,
"algorithm": algorithm,
"text_length": len(text),
"hash_length": len(hash_hex),
"status": "success"
}
except Exception as e:
return {
"error": f"Error hashing text: {str(e)}",
"status": "error"
}
def encode_decode_base64(text: str, operation: str) -> Dict[str, Union[str, int]]:
"""
Encode or decode text using Base64 encoding.
Use this function when users need to encode data for transmission
or decode Base64 encoded strings.
Args:
text: Text to encode/decode
operation: 'encode' to encode, 'decode' to decode
Returns:
Dictionary with encoding/decoding results
"""
try:
if not text:
return {"error": "Empty text provided", "status": "error"}
if operation == "encode":
encoded_bytes = base64.b64encode(text.encode('utf-8'))
result = encoded_bytes.decode('utf-8')
return {
"result": result,
"operation": "encode",
"original": text,
"original_length": len(text),
"result_length": len(result),
"status": "success"
}
elif operation == "decode":
try:
decoded_bytes = base64.b64decode(text)
result = decoded_bytes.decode('utf-8')
return {
"result": result,
"operation": "decode",
"original": text,
"original_length": len(text),
"result_length": len(result),
"status": "success"
}
except Exception as decode_error:
return {
"error": f"Invalid Base64 string: {str(decode_error)}",
"operation": "decode",
"status": "error"
}
else:
return {
"error": "Invalid operation. Use 'encode' or 'decode'",
"status": "error"
}
except Exception as e:
return {
"error": f"Error in Base64 operation: {str(e)}",
"status": "error"
}
def validate_url(url: str) -> Dict[str, Union[str, bool, Dict]]:
"""
Validate and parse URL components.
Use this function when users need to validate URLs or extract
URL components like domain, path, parameters.
Args:
url: URL to validate and parse
Returns:
Dictionary with URL validation and parsing results
"""
try:
if not url:
return {"error": "Empty URL provided", "status": "error"}
# Add protocol if missing
if not url.startswith(('http://', 'https://')):
url = 'https://' + url
parsed = urlparse(url)
# Basic validation
is_valid = bool(parsed.netloc and parsed.scheme)
return {
"is_valid": is_valid,
"original_url": url,
"components": {
"scheme": parsed.scheme,
"netloc": parsed.netloc,
"domain": parsed.netloc.split(':')[0],
"path": parsed.path,
"params": parsed.params,
"query": parsed.query,
"fragment": parsed.fragment,
"port": parsed.port
},
"status": "success"
}
except Exception as e:
return {
"error": f"Error validating URL: {str(e)}",
"url": url,
"status": "error"
}
def format_json(json_string: str, indent: int = 2) -> Dict[str, Union[str, Dict]]:
"""
Format and validate JSON strings.
Use this function when users need to format JSON data,
validate JSON syntax, or make JSON more readable.
Args:
json_string: JSON string to format
indent: Number of spaces for indentation
Returns:
Dictionary with formatted JSON results
"""
try:
if not json_string:
return {"error": "Empty JSON string provided", "status": "error"}
# Parse JSON to validate
parsed_json = json.loads(json_string)
# Format with indentation
formatted_json = json.dumps(parsed_json, indent=indent, ensure_ascii=False)
# Calculate statistics
minified_json = json.dumps(parsed_json, separators=(',', ':'))
return {
"formatted_json": formatted_json,
"minified_json": minified_json,
"is_valid": True,
"statistics": {
"original_length": len(json_string),
"formatted_length": len(formatted_json),
"minified_length": len(minified_json),
"indent_spaces": indent
},
"status": "success"
}
except json.JSONDecodeError as e:
return {
"error": f"Invalid JSON: {str(e)}",
"is_valid": False,
"original": json_string,
"status": "error"
}
except Exception as e:
return {
"error": f"Error formatting JSON: {str(e)}",
"status": "error"
} | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_2_function_tools/utility_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_2_function_tools/utility_agent/__init__.py | # Utility Agent Module | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_2_function_tools/utility_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_2_function_tools/utility_agent/agent.py | from google.adk.agents import LlmAgent
from .tools import (
process_text,
format_datetime,
calculate_date_difference,
generate_uuid,
hash_text,
encode_decode_base64,
validate_url,
format_json
)
# Create a utility agent with various utility tools
root_agent = LlmAgent(
name="utility_agent",
model="gemini-3-flash-preview",
description="A comprehensive utility agent with text processing, date/time, and data formatting capabilities",
instruction="""
You are a utility assistant with access to various utility tools for text processing,
date/time operations, data formatting, and general utility functions.
You can help users with:
**Text Processing:**
- Word and character counting
- Case conversions (uppercase, lowercase, title case)
- Text transformations (reverse, remove spaces)
- Extract emails and URLs from text
- Word frequency analysis
**Date and Time Operations:**
- Convert between different date formats
- Calculate differences between dates
- Parse and format dates
- Age calculations and duration analysis
**Data Utilities:**
- Generate UUIDs for unique identifiers
- Hash text with various algorithms (MD5, SHA1, SHA256, SHA512)
- Base64 encoding and decoding
- URL validation and parsing
- JSON formatting and validation
**Available Tools:**
- `process_text`: Text processing and analysis operations
- `format_datetime`: Convert between date/time formats
- `calculate_date_difference`: Find differences between dates
- `generate_uuid`: Generate unique identifiers
- `hash_text`: Generate hash values for text
- `encode_decode_base64`: Base64 encoding/decoding
- `validate_url`: URL validation and parsing
- `format_json`: JSON formatting and validation
**Guidelines:**
1. Always use the appropriate tool for each task
2. Explain what tool you're using and why
3. Present results clearly with context
4. Handle errors gracefully and suggest alternatives
5. Provide helpful explanations for complex operations
6. Show examples when helpful
**Example interactions:**
- "Count words in this text: 'Hello world!'" → Use process_text with count_words
- "Convert 2023-12-25 to December 25, 2023" → Use format_datetime
- "How many days between 2023-01-01 and 2023-12-31?" → Use calculate_date_difference
- "Generate a UUID" → Use generate_uuid
- "Hash this password: 'mypassword'" → Use hash_text
- "Encode this text in Base64: 'Hello World'" → Use encode_decode_base64
- "Validate this URL: example.com" → Use validate_url
- "Format this JSON: {'name':'John','age':30}" → Use format_json
**Text Processing Operations:**
- count_words: Count words in text
- count_chars: Count characters (with/without spaces)
- uppercase/lowercase/title_case: Change text case
- reverse: Reverse text
- remove_spaces: Remove all spaces
- extract_emails: Find email addresses
- extract_urls: Find URLs
- word_frequency: Analyze word frequency
**Date Format Examples:**
- '%Y-%m-%d': 2023-12-25
- '%d/%m/%Y': 25/12/2023
- '%B %d, %Y': December 25, 2023
- '%Y-%m-%d %H:%M:%S': 2023-12-25 15:30:45
**Hash Algorithms:**
- md5: Fast, 128-bit (not secure for passwords)
- sha1: 160-bit (legacy, not recommended)
- sha256: 256-bit (recommended)
- sha512: 512-bit (most secure)
Always be helpful, accurate, and provide clear explanations for your operations.
""",
tools=[
process_text,
format_datetime,
calculate_date_difference,
generate_uuid,
hash_text,
encode_decode_base64,
validate_url,
format_json
]
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_2_function_tools/calculator_agent/tools.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_2_function_tools/calculator_agent/tools.py | import math
from typing import Dict, Union, List
def calculate_basic_math(expression: str) -> Dict[str, Union[float, str]]:
"""
Calculate basic mathematical expressions safely.
Use this function when users ask for basic arithmetic calculations
like addition, subtraction, multiplication, division, or expressions
with parentheses.
Args:
expression: A mathematical expression as a string (e.g., "2 + 3 * 4")
Returns:
Dictionary containing the result and operation details
"""
try:
# Remove any potentially dangerous characters and keep only safe ones
allowed_chars = "0123456789+-*/.() "
safe_expression = ''.join(c for c in expression if c in allowed_chars)
if not safe_expression.strip():
return {
"error": "Empty or invalid expression",
"status": "error"
}
# Evaluate the expression
result = eval(safe_expression)
return {
"result": float(result),
"expression": expression,
"safe_expression": safe_expression,
"status": "success"
}
except ZeroDivisionError:
return {
"error": "Division by zero",
"expression": expression,
"status": "error"
}
except Exception as e:
return {
"error": f"Error calculating expression: {str(e)}",
"expression": expression,
"status": "error"
}
def convert_temperature(temperature: float, from_unit: str, to_unit: str) -> Dict[str, Union[float, str, Dict]]:
"""
Convert temperature between Celsius, Fahrenheit, and Kelvin.
Use this function when users ask to convert temperatures between
different units (C, F, K).
Args:
temperature: Temperature value to convert
from_unit: Source unit ('C', 'F', 'K')
to_unit: Target unit ('C', 'F', 'K')
Returns:
Dictionary with conversion results
"""
try:
# Normalize unit inputs
from_unit = from_unit.upper()
to_unit = to_unit.upper()
# Validate units
valid_units = ['C', 'F', 'K']
if from_unit not in valid_units or to_unit not in valid_units:
return {
"error": f"Invalid units. Use C, F, or K. Got: {from_unit} to {to_unit}",
"status": "error"
}
# Convert to Celsius first
if from_unit == 'F':
celsius = (temperature - 32) * 5/9
elif from_unit == 'K':
celsius = temperature - 273.15
else:
celsius = temperature
# Convert from Celsius to target unit
if to_unit == 'F':
result = celsius * 9/5 + 32
elif to_unit == 'K':
result = celsius + 273.15
else:
result = celsius
return {
"result": round(result, 2),
"conversion": {
"from": {"value": temperature, "unit": from_unit},
"to": {"value": round(result, 2), "unit": to_unit}
},
"status": "success"
}
except Exception as e:
return {
"error": f"Error converting temperature: {str(e)}",
"status": "error"
}
def calculate_compound_interest(principal: float, rate: float, years: int, compound_frequency: int = 1) -> Dict[str, Union[float, str, Dict]]:
"""
Calculate compound interest for an investment.
Use this function when users ask about investment growth,
compound interest calculations, or future value of investments.
Args:
principal: Initial investment amount
rate: Annual interest rate (as decimal, e.g., 0.05 for 5%)
years: Number of years to compound
compound_frequency: How many times per year to compound (default: 1)
Returns:
Dictionary with calculation results and breakdown
"""
try:
if principal <= 0:
return {"error": "Principal must be positive", "status": "error"}
if rate < 0:
return {"error": "Interest rate cannot be negative", "status": "error"}
if years <= 0:
return {"error": "Years must be positive", "status": "error"}
if compound_frequency <= 0:
return {"error": "Compound frequency must be positive", "status": "error"}
# Calculate compound interest: A = P(1 + r/n)^(nt)
final_amount = principal * (1 + rate/compound_frequency) ** (compound_frequency * years)
total_interest = final_amount - principal
return {
"final_amount": round(final_amount, 2),
"total_interest": round(total_interest, 2),
"calculation_details": {
"principal": principal,
"annual_rate": rate,
"rate_percentage": f"{rate * 100}%",
"years": years,
"compound_frequency": compound_frequency,
"formula": "A = P(1 + r/n)^(nt)"
},
"status": "success"
}
except Exception as e:
return {
"error": f"Error calculating compound interest: {str(e)}",
"status": "error"
}
def calculate_percentage(value: float, total: float) -> Dict[str, Union[float, str]]:
"""
Calculate what percentage one value is of another.
Use this function when users ask to calculate percentages,
such as "what percentage is 25 of 100?"
Args:
value: The value to calculate percentage for
total: The total value (100% reference)
Returns:
Dictionary with percentage calculation results
"""
try:
if total == 0:
return {
"error": "Cannot calculate percentage when total is zero",
"status": "error"
}
percentage = (value / total) * 100
return {
"percentage": round(percentage, 2),
"calculation": {
"value": value,
"total": total,
"formula": f"({value} / {total}) * 100"
},
"formatted": f"{round(percentage, 2)}%",
"status": "success"
}
except Exception as e:
return {
"error": f"Error calculating percentage: {str(e)}",
"status": "error"
}
def calculate_statistics(numbers: List[float]) -> Dict[str, Union[float, str, int]]:
"""
Calculate basic statistics for a list of numbers.
Use this function when users ask for statistical analysis
of a set of numbers (mean, median, mode, etc.).
Args:
numbers: List of numbers to analyze
Returns:
Dictionary with statistical results
"""
try:
if not numbers:
return {"error": "Cannot calculate statistics for empty list", "status": "error"}
# Convert to floats and validate
try:
nums = [float(x) for x in numbers]
except (ValueError, TypeError):
return {"error": "All values must be numbers", "status": "error"}
# Calculate statistics
mean = sum(nums) / len(nums)
sorted_nums = sorted(nums)
# Median
n = len(sorted_nums)
if n % 2 == 0:
median = (sorted_nums[n//2 - 1] + sorted_nums[n//2]) / 2
else:
median = sorted_nums[n//2]
# Mode (most frequent value)
from collections import Counter
counts = Counter(nums)
mode_count = max(counts.values())
modes = [k for k, v in counts.items() if v == mode_count]
# Standard deviation
variance = sum((x - mean) ** 2 for x in nums) / len(nums)
std_dev = math.sqrt(variance)
return {
"count": len(nums),
"mean": round(mean, 2),
"median": round(median, 2),
"mode": modes[0] if len(modes) == 1 else modes,
"min": min(nums),
"max": max(nums),
"range": max(nums) - min(nums),
"standard_deviation": round(std_dev, 2),
"sum": sum(nums),
"status": "success"
}
except Exception as e:
return {
"error": f"Error calculating statistics: {str(e)}",
"status": "error"
}
def round_number(number: float, decimal_places: int = 2) -> Dict[str, Union[float, str]]:
"""
Round a number to specified decimal places.
Use this function when users ask to round numbers to specific
decimal places or need cleaner number formatting.
Args:
number: Number to round
decimal_places: Number of decimal places (default: 2)
Returns:
Dictionary with rounded number and details
"""
try:
if decimal_places < 0:
return {"error": "Decimal places cannot be negative", "status": "error"}
rounded_number = round(number, decimal_places)
return {
"rounded_number": rounded_number,
"original_number": number,
"decimal_places": decimal_places,
"status": "success"
}
except Exception as e:
return {
"error": f"Error rounding number: {str(e)}",
"status": "error"
} | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_2_function_tools/calculator_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_2_function_tools/calculator_agent/__init__.py | from . import agent | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_2_function_tools/calculator_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/4_tool_using_agent/4_2_function_tools/calculator_agent/agent.py | from google.adk.agents import LlmAgent
from .tools import (
calculate_basic_math,
convert_temperature,
calculate_compound_interest,
calculate_percentage,
calculate_statistics,
round_number
)
# Create a calculator agent with custom function tools
root_agent = LlmAgent(
name="calculator_agent",
model="gemini-3-flash-preview",
description="A comprehensive calculator agent with mathematical and statistical capabilities",
instruction="""
You are a smart calculator assistant with access to various mathematical tools.
You can help users with:
**Basic Mathematics:**
- Arithmetic calculations (addition, subtraction, multiplication, division)
- Mathematical expressions with parentheses
- Order of operations (PEMDAS/BODMAS)
**Conversions:**
- Temperature conversions (Celsius, Fahrenheit, Kelvin)
- Unit conversions and formatting
**Financial Calculations:**
- Compound interest calculations
- Investment growth projections
- Percentage calculations
**Statistics:**
- Mean, median, mode calculations
- Standard deviation and variance
- Min, max, range, and sum
**Utilities:**
- Number rounding to specified decimal places
- Data formatting and presentation
**Available Tools:**
- `calculate_basic_math`: For arithmetic expressions
- `convert_temperature`: For temperature unit conversions
- `calculate_compound_interest`: For investment calculations
- `calculate_percentage`: For percentage calculations
- `calculate_statistics`: For statistical analysis
- `round_number`: For number rounding
**Guidelines:**
1. Always use the appropriate tool for calculations
2. Explain your approach and the tool you're using
3. Present results clearly with context
4. Handle errors gracefully and suggest alternatives
5. Show the formula or method when helpful
6. Provide detailed breakdowns for complex calculations
**Example interactions:**
- "Calculate 15% of 200" → Use calculate_percentage
- "What's 25 * 4 + 10?" → Use calculate_basic_math
- "Convert 100°F to Celsius" → Use convert_temperature
- "Find mean of [1,2,3,4,5]" → Use calculate_statistics
- "Compound interest on $1000 at 5% for 10 years" → Use calculate_compound_interest
Always be helpful, accurate, and educational in your responses.
""",
tools=[
calculate_basic_math,
convert_temperature,
calculate_compound_interest,
calculate_percentage,
calculate_statistics,
round_number
]
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/3_structured_output_agent/3_2_email_agent/email_generator_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/3_structured_output_agent/3_2_email_agent/email_generator_agent/__init__.py | from . import agent
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/3_structured_output_agent/3_2_email_agent/email_generator_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/3_structured_output_agent/3_2_email_agent/email_generator_agent/agent.py | from google.adk.agents import LlmAgent
from pydantic import BaseModel, Field
class EmailContent(BaseModel):
"""Schema for email content with subject and body."""
subject: str = Field(
description="The subject line of the email. Should be concise and descriptive."
)
body: str = Field(
description="The main content of the email. Should be well-formatted with proper greeting, paragraphs, and signature."
)
root_agent = LlmAgent(
name="email_generator_agent",
model="gemini-3-flash-preview",
description="Professional email generator that creates structured email content",
instruction="""
You are a professional email writing assistant.
IMPORTANT: Your response must be a JSON object with exactly these fields:
- "subject": A concise, relevant subject line
- "body": Well-formatted email content with greeting, main content, and closing
Format your response as valid JSON only.
""",
output_schema=EmailContent, # This is where the magic happens
output_key="generated_email"
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/3_structured_output_agent/3_1_customer_support_ticket_agent/customer_support_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/3_structured_output_agent/3_1_customer_support_ticket_agent/customer_support_agent/__init__.py | from . import agent | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/3_structured_output_agent/3_1_customer_support_ticket_agent/customer_support_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/3_structured_output_agent/3_1_customer_support_ticket_agent/customer_support_agent/agent.py | from typing import List, Optional
from enum import Enum
from google.adk.agents import LlmAgent
from pydantic import BaseModel, Field
class Priority(str, Enum):
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class SupportTicket(BaseModel):
title: str = Field(description="A concise summary of the issue")
description: str = Field(description="Detailed description of the problem")
priority: Priority = Field(description="The ticket priority level")
category: str = Field(description="The department this ticket belongs to")
steps_to_reproduce: Optional[List[str]] = Field(
description="Steps to reproduce the issue (for technical problems)",
default=None
)
estimated_resolution_time: str = Field(
description="Estimated time to resolve this issue"
)
root_agent = LlmAgent(
name="customer_support_agent",
model="gemini-3-flash-preview",
description="Creates structured support tickets from user reports",
instruction="""
You are a support ticket creation assistant.
Based on user problem descriptions, create well-structured support tickets with appropriate priority levels, categories, and resolution estimates.
IMPORTANT: Response must be valid JSON matching the SupportTicket schema with these fields:
- "title": Concise summary of the issue
- "description": Detailed problem description
- "priority": One of "low", "medium", "high", or "critical"
- "category": Department (e.g., "Technical", "Billing", "Account", "Product")
- "steps_to_reproduce": List of steps (for technical issues) or null
- "estimated_resolution_time": Estimated resolution time (e.g., "2-4 hours", "1-2 days")
Format your response as valid JSON only.
""",
output_schema=SupportTicket,
output_key="support_ticket"
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_1_sequential_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_1_sequential_agent/agent.py | import os
import asyncio
import inspect
from dotenv import load_dotenv
from google.adk.agents import LlmAgent, SequentialAgent
from google.adk.tools import google_search
from google.adk.tools.agent_tool import AgentTool
from google.adk.sessions import InMemorySessionService
from google.adk.runners import Runner
from google.genai import types
# Load environment variables
load_dotenv()
# --- Search Agent (Wrapped as AgentTool) ---
search_agent = LlmAgent(
name="search_agent",
model="gemini-3-flash-preview",
description="Conducts web search for current market information and competitive analysis",
instruction=(
"You are a web search specialist. When given a business topic:\n"
"1. Use web search to find current market information\n"
"2. Identify key competitors and their market position\n"
"3. Gather recent industry trends and market data\n"
"4. Find market size estimates and growth projections\n"
"5. Provide comprehensive, up-to-date market analysis\n\n"
"Always use web search to get the most current information available."
),
tools=[google_search]
)
# --- Simple Sub-agents ---
market_researcher = LlmAgent(
name="market_researcher",
model="gemini-3-flash-preview",
description="Conducts market research and competitive analysis using search capabilities",
instruction=(
"You are a market research specialist. Given a business topic:\n"
"1. Use the search_agent to gather current market information\n"
"2. Identify key competitors and their market position\n"
"3. Analyze current market trends and opportunities\n"
"4. Provide industry insights and market size estimates\n"
"5. Synthesize search results into comprehensive market analysis\n\n"
"Provide a comprehensive analysis in clear, structured format based on current web research."
),
tools=[AgentTool(search_agent)]
)
swot_analyzer = LlmAgent(
name="swot_analyzer",
model="gemini-3-flash-preview",
description="Performs SWOT analysis based on market research",
instruction=(
"You are a strategic analyst. Given market research findings:\n"
"1. Identify internal strengths and competitive advantages\n"
"2. Assess internal weaknesses and limitations\n"
"3. Identify external opportunities in the market\n"
"4. Evaluate external threats and challenges\n\n"
"Provide a clear SWOT analysis with actionable insights."
)
)
strategy_formulator = LlmAgent(
name="strategy_formulator",
model="gemini-3-flash-preview",
description="Develops strategic objectives and action plans",
instruction=(
"You are a strategic planner. Given SWOT analysis results:\n"
"1. Define 3-5 key strategic objectives\n"
"2. Create specific action items for each objective\n"
"3. Recommend realistic timeline for implementation\n"
"4. Define success metrics and KPIs to track\n\n"
"Provide a clear strategic plan with actionable steps."
)
)
implementation_planner = LlmAgent(
name="implementation_planner",
model="gemini-3-flash-preview",
description="Creates detailed implementation roadmap",
instruction=(
"You are an implementation specialist. Given the strategy plan:\n"
"1. Identify required resources (human, financial, technical)\n"
"2. Define key milestones and checkpoints\n"
"3. Develop risk mitigation strategies\n"
"4. Provide final recommendations with confidence level\n\n"
"Create a practical implementation roadmap."
)
)
# --- Sequential Agent (Pure Sequential Pattern) ---
business_intelligence_team = SequentialAgent(
name="business_intelligence_team",
description="Sequentially processes business intelligence through research, analysis, strategy, and planning",
sub_agents=[
market_researcher, # Step 1: Market research (with search capabilities)
swot_analyzer, # Step 2: SWOT analysis
strategy_formulator, # Step 3: Strategy development
implementation_planner # Step 4: Implementation planning
]
)
# --- Runner Setup for Execution ---
session_service = InMemorySessionService()
runner = Runner(
agent=business_intelligence_team,
app_name="business_intelligence",
session_service=session_service
)
# --- Simple Execution Function ---
async def analyze_business_intelligence(user_id: str, business_topic: str) -> str:
"""Process business intelligence through the sequential pipeline"""
session_id = f"bi_session_{user_id}"
# Support both sync and async session service
async def _maybe_await(value):
return await value if inspect.isawaitable(value) else value
session = await _maybe_await(session_service.get_session(
app_name="business_intelligence",
user_id=user_id,
session_id=session_id
))
if not session:
session = await _maybe_await(session_service.create_session(
app_name="business_intelligence",
user_id=user_id,
session_id=session_id,
state={"business_topic": business_topic, "conversation_history": []}
))
# Create user content
user_content = types.Content(
role='user',
parts=[types.Part(text=f"Please analyze this business topic: {business_topic}")]
)
# Run the sequential pipeline (support async or sync stream)
response_text = ""
stream = runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content
)
if inspect.isasyncgen(stream):
async for event in stream:
if event.is_final_response():
if event.content and event.content.parts:
response_text = event.content.parts[0].text
else:
for event in stream:
if getattr(event, "is_final_response", lambda: False)():
if event.content and event.content.parts:
response_text = event.content.parts[0].text
return response_text
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_1_sequential_agent/app.py | ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_1_sequential_agent/app.py | import streamlit as st
import asyncio
from agent import business_intelligence_team, analyze_business_intelligence
# Page configuration
st.set_page_config(
page_title="Sequential Agent Demo",
page_icon=":arrow_right:",
layout="wide"
)
# Title and description
st.title("🚀 Business Implementation Plan Generator Agent")
st.markdown("""
This **Business Implementation Plan Generator Agent** analyzes business opportunities through a comprehensive 4-step process:
1. **🔍 Market Analysis** - Researches market, competitors, and trends using web search
2. **📊 SWOT Analysis** - Identifies strengths, weaknesses, opportunities, and threats
3. **🎯 Strategy Development** - Creates strategic objectives and action plans
4. **📋 Implementation Planning** - Generates detailed business implementation roadmap
**Result**: A complete business implementation plan ready for execution.
""")
# This is a placeholder user_id for demo purposes.
# In a real app, you might use authentication or session state to set this.
user_id = "demo_user"
# Sample business topics
sample_topics = [
"Electric vehicle charging stations in urban areas",
"AI-powered healthcare diagnostics",
"Sustainable food delivery services",
"Remote work collaboration tools",
"Renewable energy storage solutions"
]
# Main content
st.header("Generate Your Business Implementation Plan")
# Topic input
business_topic = st.text_area(
"Enter a business opportunity to analyze:",
value=sample_topics[0],
height=100,
placeholder="Describe a business opportunity, industry, or market you'd like to analyze for implementation planning..."
)
# Sample topics
st.subheader("Or choose from sample business opportunities:")
cols = st.columns(len(sample_topics))
for i, topic in enumerate(sample_topics):
if cols[i].button(topic, key=f"topic_{i}"):
business_topic = topic
st.rerun()
# Analysis button
if st.button("🚀 Generate Business Implementation Plan", type="primary"):
if business_topic.strip():
st.info("🚀 Starting business analysis... This will research the market, perform SWOT analysis, develop strategy, and create an implementation plan.")
# Display the workflow
st.subheader("Business Analysis Workflow")
col1, col2, col3, col4 = st.columns(4)
with col1:
st.markdown("**1. Market Analysis**")
st.markdown("🔍 Web search + competitive research")
with col2:
st.markdown("**2. SWOT Analysis**")
st.markdown("📊 Strengths, Weaknesses, Opportunities, Threats")
with col3:
st.markdown("**3. Strategy Development**")
st.markdown("🎯 Strategic objectives and action plans")
with col4:
st.markdown("**4. Implementation Planning**")
st.markdown("📋 Detailed roadmap and execution plan")
# Run the actual analysis
with st.spinner("Generating comprehensive business implementation plan..."):
try:
result = asyncio.run(analyze_business_intelligence(user_id, business_topic))
st.success("✅ Business Implementation Plan Generated!")
st.subheader("Your Business Implementation Plan")
st.markdown(result)
except Exception as e:
st.error(f"❌ Error during analysis: {str(e)}")
st.info("Make sure you have set up your GOOGLE_API_KEY in the .env file")
else:
st.error("Please enter a business opportunity to analyze.")
# How it works (in sidebar)
with st.sidebar:
st.header("How It Works")
st.markdown("""
The **Business Implementation Plan Generator Agent** uses a sophisticated sequential workflow to create comprehensive business plans:
1. **🔍 Market Analysis Agent**: Uses web search to research current market conditions, competitors, and trends
2. **📊 SWOT Analysis Agent**: Analyzes the market research to identify strategic insights
3. **🎯 Strategy Development Agent**: Creates strategic objectives and action plans based on SWOT analysis
4. **📋 Implementation Planning Agent**: Develops detailed execution roadmaps and resource requirements
**Key Innovation**: The Market Analysis Agent has access to a specialized Search Agent (wrapped as AgentTool) that can perform real-time web searches for current market intelligence.
Each agent builds upon the previous agent's output, creating a comprehensive business implementation plan ready for execution.
""")
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_3_parallel_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_3_parallel_agent/agent.py | from typing import Dict, Any
import inspect
from dotenv import load_dotenv
from google.adk.agents import LlmAgent, ParallelAgent
from google.adk.sessions import InMemorySessionService
from google.adk.runners import Runner
from google.genai import types
load_dotenv()
# Child agents write to distinct keys in session.state for UI consumption
market_trends_agent = LlmAgent(
name="market_trends_agent",
model="gemini-3-flash-preview",
description="Summarizes recent market trends for the topic",
instruction=(
"Summarize 3-5 recent market trends for the topic in session.state['topic'].\n"
"Output a concise markdown list."
),
)
competitor_intel_agent = LlmAgent(
name="competitor_intel_agent",
model="gemini-3-flash-preview",
description="Identifies key competitors and positioning",
instruction=(
"List 3-5 notable competitors for session.state['topic'] and describe their positioning briefly."
),
)
funding_news_agent = LlmAgent(
name="funding_news_agent",
model="gemini-3-flash-preview",
description="Reports funding/partnership news",
instruction=(
"Provide a short digest (bulleted) of recent funding or partnership news related to session.state['topic']."
),
)
# Parallel orchestrator
market_snapshot_team = ParallelAgent(
name="market_snapshot_team",
description="Runs multiple research agents concurrently to produce a market snapshot",
sub_agents=[
market_trends_agent,
competitor_intel_agent,
funding_news_agent,
],
)
# Runner and session service
session_service = InMemorySessionService()
runner = Runner(agent=market_snapshot_team, app_name="parallel_snapshot_app", session_service=session_service)
async def gather_market_snapshot(user_id: str, topic: str) -> Dict[str, Any]:
"""Execute the parallel agents and return combined snapshot text blocks.
Returns keys: 'market_trends', 'competitors', 'funding_news'.
"""
session_id = f"parallel_snapshot_{user_id}"
async def _maybe_await(v):
return await v if inspect.isawaitable(v) else v
session = await _maybe_await(
session_service.get_session(
app_name="parallel_snapshot_app", user_id=user_id, session_id=session_id
)
)
if not session:
session = await _maybe_await(
session_service.create_session(
app_name="parallel_snapshot_app",
user_id=user_id,
session_id=session_id,
state={"topic": topic},
)
)
else:
if hasattr(session, "state") and isinstance(session.state, dict):
session.state["topic"] = topic
user_content = types.Content(
role="user",
parts=[types.Part(text=f"Topic: {topic}. Provide a concise snapshot per agent focus.")],
)
# Collect last text emitted per agent
last_text_by_agent: Dict[str, str] = {}
stream = runner.run_async(user_id=user_id, session_id=session_id, new_message=user_content)
if inspect.isasyncgen(stream):
async for event in stream:
if getattr(event, "content", None) and getattr(event.content, "parts", None):
for part in event.content.parts:
if hasattr(part, "text") and part.text:
author = getattr(event, "author", "")
if author:
last_text_by_agent[author] = part.text
else:
for event in stream:
if getattr(event, "content", None) and getattr(event.content, "parts", None):
for part in event.content.parts:
if hasattr(part, "text") and part.text:
author = getattr(event, "author", "")
if author:
last_text_by_agent[author] = part.text
return {
"market_trends": last_text_by_agent.get(market_trends_agent.name, ""),
"competitors": last_text_by_agent.get(competitor_intel_agent.name, ""),
"funding_news": last_text_by_agent.get(funding_news_agent.name, ""),
}
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_3_parallel_agent/app.py | ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_3_parallel_agent/app.py | import streamlit as st
import asyncio
from agent import market_snapshot_team, gather_market_snapshot
st.set_page_config(page_title="Parallel Agent Demo", page_icon=":fast_forward:", layout="wide")
st.title("⚡ Market Snapshot with Gemini 3 Flash(Parallel Agents)")
st.markdown(
"""
This demo runs multiple research agents in parallel using a ParallelAgent:
- Market trends analysis
- Competitor intelligence
- Funding and partnerships news
Each sub-agent writes its results into a shared session.state under distinct keys. A subsequent step (or this UI) can read the combined snapshot.
"""
)
user_id = "demo_parallel_user"
st.header("Run a market snapshot")
topic = st.text_input(
"Research topic",
value="AI-powered customer support platforms",
placeholder="What market/topic do you want a quick parallel snapshot on?",
)
if st.button("Run Parallel Research", type="primary"):
if topic.strip():
st.info("Running parallel agents… market trends, competitors, and funding news")
with st.spinner("Gathering snapshot…"):
try:
results = asyncio.run(gather_market_snapshot(user_id, topic))
st.success("Snapshot ready")
col1, col2, col3 = st.columns(3)
with col1:
st.subheader("Market Trends")
st.write(results.get("market_trends", ""))
with col2:
st.subheader("Competitors")
st.write(results.get("competitors", ""))
with col3:
st.subheader("Funding News")
st.write(results.get("funding_news", ""))
except Exception as e:
st.error(f"Error: {e}")
else:
st.error("Please enter a topic")
with st.sidebar:
st.header("How it works")
st.markdown(
"""
- Uses `ParallelAgent` to execute sub-agents concurrently
- Each child runs on its own invocation branch, but shares the same session.state
- Distinct `output_key`s prevent overwrites in the shared state
- This pattern is ideal for fan-out data gathering before synthesis
"""
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_2_loop_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_2_loop_agent/agent.py | import os
import asyncio
import inspect
from typing import AsyncGenerator, Dict, Any
from dotenv import load_dotenv
from google.adk.agents import LlmAgent, LoopAgent
from google.adk.agents.base_agent import BaseAgent
from google.adk.agents.invocation_context import InvocationContext
from google.adk.sessions import InMemorySessionService
from google.adk.runners import Runner
from google.adk.events import Event, EventActions
from google.genai import types
# Load environment variables
load_dotenv()
# ------------------------------------------------------------
# Sub-agent 1: LLM refiner that improves the plan each iteration
# ------------------------------------------------------------
plan_refiner = LlmAgent(
name="plan_refiner",
model="gemini-3-flash-preview",
description="Iteratively refines a brief product/launch plan given topic and prior context",
instruction=(
"You are an iterative planner. On each turn:\n"
"- Improve and tighten the current plan for the topic in session state\n"
"- Keep it concise (5-8 bullets) and avoid repeating prior text verbatim\n"
"- Incorporate clarity, feasibility, and crisp sequencing\n"
"- Assume this output will be refined again in subsequent iterations\n\n"
"Output format:\n"
"Title line\n"
"- Bullet 1\n- Bullet 2\n- Bullet 3 ..."
),
)
# ------------------------------------------------------------
# Sub-agent 2: Progress tracker increments iteration counter
# ------------------------------------------------------------
class IncrementIteration(BaseAgent):
async def _run_async_impl(self, ctx: InvocationContext) -> AsyncGenerator[Event, None]:
current_iteration = int(ctx.session.state.get("iteration", 0)) + 1
ctx.session.state["iteration"] = current_iteration
yield Event(
author=self.name,
content=types.Content(
role="model",
parts=[
types.Part(
text=f"Iteration advanced to {current_iteration}"
)
],
),
)
# ------------------------------------------------------------
# Sub-agent 3: Completion check with optional early stop
# - Stops if iteration >= target_iterations OR session flag 'accepted' is True
# ------------------------------------------------------------
class CheckCompletion(BaseAgent):
async def _run_async_impl(self, ctx: InvocationContext) -> AsyncGenerator[Event, None]:
target_iterations = int(ctx.session.state.get("target_iterations", 3))
current_iteration = int(ctx.session.state.get("iteration", 0))
accepted = bool(ctx.session.state.get("accepted", False))
reached_limit = current_iteration >= target_iterations
should_stop = accepted or reached_limit
yield Event(
author=self.name,
actions=EventActions(escalate=should_stop),
content=types.Content(
role="model",
parts=[
types.Part(
text=(
"Stopping criteria met"
if should_stop
else "Continuing loop"
)
)
],
),
)
increment_iteration = IncrementIteration(name="increment_iteration")
check_completion = CheckCompletion(name="check_completion")
# ------------------------------------------------------------
# LoopAgent: Executes sub-agents sequentially in a loop
# - Termination: max_iterations, or CheckCompletion escalates
# - Context & State: Same InvocationContext across iterations
# ------------------------------------------------------------
spec_refinement_loop = LoopAgent(
name="spec_refinement_loop",
description=(
"Iteratively refines a plan using LLM, tracks iterations, and stops when target iterations "
"are reached or an 'accepted' flag is set in session state."
),
max_iterations=10,
sub_agents=[
plan_refiner,
increment_iteration,
check_completion,
],
)
# ------------------------------------------------------------
# Runner setup
# ------------------------------------------------------------
session_service = InMemorySessionService()
runner = Runner(
agent=spec_refinement_loop,
app_name="loop_refinement_app",
session_service=session_service,
)
# ------------------------------------------------------------
# Public API: run the loop refinement for a topic
# ------------------------------------------------------------
async def iterate_spec_until_acceptance(
user_id: str, topic: str, target_iterations: int = 3
) -> Dict[str, Any]:
"""Run the LoopAgent to iteratively refine a plan.
Returns a dictionary with final plan text and iteration metadata.
"""
session_id = f"loop_refinement_{user_id}"
async def _maybe_await(value):
return await value if inspect.isawaitable(value) else value
# Create or get session (support both sync/async services)
session = await _maybe_await(session_service.get_session(
app_name="loop_refinement_app",
user_id=user_id,
session_id=session_id,
))
if not session:
session = await _maybe_await(session_service.create_session(
app_name="loop_refinement_app",
user_id=user_id,
session_id=session_id,
state={
"topic": topic,
"iteration": 0,
"target_iterations": int(target_iterations),
# Optionally, an external process or UI could set this to True to stop early
"accepted": False,
},
))
else:
# Refresh topic/target if user re-runs on UI
if hasattr(session, "state") and isinstance(session.state, dict):
session.state["topic"] = topic
session.state["target_iterations"] = int(target_iterations)
# Seed message for LLM
user_content = types.Content(
role="user",
parts=[
types.Part(
text=(
"Topic: "
+ topic
+ "\nPlease produce or refine a concise plan."
)
)
],
)
final_text = ""
last_plan_text = ""
stream = runner.run_async(user_id=user_id, session_id=session_id, new_message=user_content)
# Support both async generators and plain iterables
if inspect.isasyncgen(stream):
async for event in stream:
if event.content and getattr(event.content, "parts", None):
for part in event.content.parts:
if hasattr(part, "text") and part.text:
# Keep last text from plan_refiner preferentially
if getattr(event, "author", "") == plan_refiner.name:
last_plan_text = part.text
if event.is_final_response():
final_text = part.text
else:
for event in stream:
if event.content and getattr(event.content, "parts", None):
for part in event.content.parts:
if hasattr(part, "text") and part.text:
if getattr(event, "author", "") == plan_refiner.name:
last_plan_text = part.text
# final events in sync mode
final_text = part.text
if event.content and getattr(event.content, "parts", None):
for part in event.content.parts:
if hasattr(part, "text") and part.text:
# Keep last text from plan_refiner preferentially
if getattr(event, "author", "") == plan_refiner.name:
last_plan_text = part.text
if event.is_final_response():
final_text = part.text
current_iteration = int(session.state.get("iteration", 0))
reached = current_iteration >= int(session.state.get("target_iterations", 0))
accepted = bool(session.state.get("accepted", False))
return {
"final_plan": last_plan_text or final_text,
"iterations": current_iteration,
"stopped_reason": "accepted" if accepted else ("target_iterations" if reached else "max_iterations_or_other"),
}
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_2_loop_agent/app.py | ai_agent_framework_crash_course/google_adk_crash_course/9_multi_agent_patterns/9_2_loop_agent/app.py | import streamlit as st
import asyncio
from agent import iterate_spec_until_acceptance
st.set_page_config(page_title="Loop Agent Demo", page_icon=":repeat:", layout="wide")
st.title("🔁 Iterative Plan Refiner with Gemini 3 Flash(Loop Agent)")
st.markdown(
"""
This demo runs a LoopAgent that repeatedly executes sub-agents to iteratively refine a plan.
Loop characteristics:
- Executes its sub-agents sequentially in a loop
- Terminates when the session's `accepted` flag is set or after the target iterations
- Shares the same session state across iterations, so counters/flags persist
"""
)
user_id = "demo_loop_user"
st.header("Run an iterative refinement")
topic = st.text_area(
"Topic",
value="AI-powered customer support platform launch plan",
height=100,
placeholder="What plan/topic should be refined iteratively?",
)
col_a, col_b = st.columns([1, 1])
with col_a:
target_iterations = st.number_input(
"Target iterations (early stop possible)", min_value=1, max_value=20, value=3, step=1
)
with col_b:
st.caption(
"Set a reasonable number of iterations. The loop may stop earlier if the session state flag `accepted` becomes True."
)
if st.button("Run Loop Refinement", type="primary"):
if topic.strip():
st.info("Refining plan in a loop…")
with st.spinner("Working…"):
try:
results = asyncio.run(
iterate_spec_until_acceptance(user_id, topic, int(target_iterations))
)
st.success("Loop finished")
st.subheader("Final Refined Plan")
st.write(results.get("final_plan", ""))
st.subheader("Run Metadata")
st.write({
"iterations": results.get("iterations"),
"stopped_reason": results.get("stopped_reason"),
})
except Exception as e:
st.error(f"Error: {e}")
else:
st.error("Please enter a topic")
with st.sidebar:
st.header("How it works")
st.markdown(
"""
- Uses `LoopAgent` with 3 sub-agents:
1) `plan_refiner` (LLM) refines the plan
2) `increment_iteration` updates the iteration counter in session state
3) `check_completion` escalates when done (accepted flag or target reached)
- The same `InvocationContext` and session state are reused every iteration
- The loop stops if `accepted` is True or the `target_iterations` is reached.
"""
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/adk_yaml_examples/multi_agent_web_research_team/multi_agent_web_researcher/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/adk_yaml_examples/multi_agent_web_research_team/multi_agent_web_researcher/__init__.py | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false | |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_2_llm_interaction_callbacks/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_2_llm_interaction_callbacks/agent.py | #!/usr/bin/env python3
"""
LLM Interaction Callbacks Demo
Simple agent that demonstrates LLM request/response monitoring
"""
import os
from datetime import datetime
from typing import Optional
from google.adk.agents import LlmAgent
from google.adk.agents.callback_context import CallbackContext
from google.adk.runners import InMemoryRunner
from google.genai import types
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
def before_model_callback(callback_context: CallbackContext, llm_request) -> Optional[types.Content]:
"""Callback before LLM request is made"""
agent_name = callback_context.agent_name
request_time = datetime.now()
# Extract model and prompt from llm_request
model = getattr(llm_request, 'model', 'unknown')
# Extract full prompt text from llm_request contents
prompt_text = "unknown"
if hasattr(llm_request, 'contents') and llm_request.contents:
for content in llm_request.contents:
if hasattr(content, 'parts') and content.parts:
for part in content.parts:
if hasattr(part, 'text') and part.text:
prompt_text = part.text
break
if prompt_text != "unknown":
break
print(f"🤖 LLM Request to {model}")
print(f"⏰ Request time: {request_time.strftime('%H:%M:%S')}")
print(f"📋 Agent: {agent_name}")
print() # Add spacing
# Store request info in state for after callback
current_state = callback_context.state.to_dict()
current_state["llm_request_time"] = request_time.isoformat()
current_state["llm_model"] = model
current_state["llm_prompt_length"] = len(prompt_text)
callback_context.state.update(current_state)
# Return None to allow normal execution
return None
def after_model_callback(callback_context: CallbackContext, llm_response) -> Optional[types.Content]:
"""Callback after LLM response is received"""
agent_name = callback_context.agent_name
current_state = callback_context.state.to_dict()
# Extract response info
response_text = str(llm_response) if llm_response else 'unknown'
model = current_state.get("llm_model", "unknown")
# Extract token count from usage_metadata
tokens = 0
if llm_response and hasattr(llm_response, 'usage_metadata') and llm_response.usage_metadata:
tokens = getattr(llm_response.usage_metadata, 'total_token_count', 0)
# Get request time from state
request_time_str = current_state.get("llm_request_time")
if request_time_str:
request_time = datetime.fromisoformat(request_time_str)
duration = datetime.now() - request_time
duration_seconds = duration.total_seconds()
else:
duration_seconds = 0
print(f"📝 LLM Response from {model}")
print(f"⏱️ Duration: {duration_seconds:.2f}s")
print(f"🔢 Tokens: {tokens}")
# Calculate estimated cost for Gemini 3 Flash
# Pricing: $2.50 per 1M output tokens (including thinking tokens)
cost_per_1k_output = 0.0025 # $2.50 per 1M = $0.0025 per 1K
estimated_cost = (tokens / 1000) * cost_per_1k_output
print(f"💰 Estimated cost: ${estimated_cost:.4f}")
print() # Add spacing
# Return None to use the original response
return None
# Create agent with LLM callbacks
root_agent = LlmAgent(
name="llm_monitor_agent",
model="gemini-3-flash-preview",
description="Agent with LLM interaction monitoring",
instruction="""
You are a helpful assistant with LLM monitoring.
Your role is to:
- Provide clear, informative responses
- Keep responses concise but comprehensive
- Demonstrate the LLM callback system
The system will automatically track:
- Your requests to the LLM model
- Response times and token usage
- Estimated API costs
Focus on being helpful while showing the monitoring capabilities.
""",
before_model_callback=before_model_callback,
after_model_callback=after_model_callback
)
# Create runner for agent execution
runner = InMemoryRunner(agent=root_agent, app_name="llm_monitor_app")
async def run_agent(message: str) -> str:
"""Run the agent with the given message"""
user_id = "demo_user"
session_id = "demo_session"
# Get the bundled session service
session_service = runner.session_service
# Get or create session
session = await session_service.get_session(
app_name="llm_monitor_app",
user_id=user_id,
session_id=session_id
)
if not session:
session = await session_service.create_session(
app_name="llm_monitor_app",
user_id=user_id,
session_id=session_id,
state={"conversation_history": []}
)
# Create user content
user_content = types.Content(
role='user',
parts=[types.Part(text=message)]
)
# Run agent and get response
response_text = ""
async for event in runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content
):
if event.is_final_response() and event.content:
response_text = event.content.parts[0].text.strip()
# Don't break here - let the loop complete naturally to ensure callbacks run
return response_text
if __name__ == "__main__":
import asyncio
# Test the agent
print("🧪 Testing LLM Interaction Callbacks")
print("=" * 50)
test_messages = [
"Explain quantum computing in simple terms",
"Write a short poem about AI",
"What are the benefits of renewable energy?"
]
async def test_agent():
for message in test_messages:
print(f"\n🤖 User: {message}")
response = await run_agent(message)
print(f"🤖 Agent: {response}")
print("-" * 50)
asyncio.run(test_agent()) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_2_llm_interaction_callbacks/app.py | ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_2_llm_interaction_callbacks/app.py | #!/usr/bin/env python3
"""
Streamlit App for LLM Interaction Callbacks Demo
"""
import streamlit as st
import sys
import os
import asyncio
from agent import run_agent
# Page configuration
st.set_page_config(
page_title="LLM Interaction Callbacks",
page_icon="🤖",
layout="wide"
)
# Title and description
st.title("🤖 LLM Interaction Callbacks Demo")
st.markdown("""
This demo shows how to monitor LLM requests and responses using callbacks.
Watch the console output to see detailed LLM interaction tracking!
""")
# Sidebar with information
with st.sidebar:
st.header("📊 LLM Monitoring")
st.markdown("""
**Request Callback**: Triggered when LLM request is sent
- Logs model name and prompt
- Records request timestamp
- Tracks prompt length
**Response Callback**: Triggered when LLM response is received
- Calculates response duration
- Tracks token usage
- Estimates API costs
""")
# Main chat interface
st.header("💬 Chat with LLM Monitor")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Ask me something..."):
# Add user message to chat
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Get agent response
with st.chat_message("assistant"):
with st.spinner("🤖 LLM is processing..."):
response = asyncio.run(run_agent(prompt))
st.markdown(response)
# Add assistant response to chat
st.session_state.messages.append({"role": "assistant", "content": response})
# Quick test buttons
st.markdown("---")
st.header("⚡ Quick Tests")
col1, col2, col3 = st.columns(3)
with col1:
if st.button("🔬 Science Test"):
with st.chat_message("user"):
st.markdown("Explain quantum computing in simple terms")
with st.chat_message("assistant"):
with st.spinner("🤖 LLM is processing..."):
response = asyncio.run(run_agent("Explain quantum computing in simple terms"))
st.markdown(response)
with col2:
if st.button("📝 Poetry Test"):
with st.chat_message("user"):
st.markdown("Write a short poem about AI")
with st.chat_message("assistant"):
with st.spinner("🤖 LLM is processing..."):
response = asyncio.run(run_agent("Write a short poem about AI"))
st.markdown(response)
with col3:
if st.button("🌍 Environment Test"):
with st.chat_message("user"):
st.markdown("What are the benefits of renewable energy?")
with st.chat_message("assistant"):
with st.spinner("🤖 LLM is processing..."):
response = asyncio.run(run_agent("What are the benefits of renewable energy?"))
st.markdown(response)
# Clear chat button
if st.button("🗑️ Clear Chat History"):
st.session_state.messages = []
st.rerun()
# Information about callbacks
st.markdown("---")
st.header("📋 LLM Callback Output")
st.markdown("""
**Check your console/terminal** to see the LLM interaction output:
```
🤖 LLM Request to gemini-3-flash-preview
⏰ Request time: 10:30:15
📋 Agent: llm_monitor_agent
📝 LLM Response from gemini-3-flash-preview
⏱️ Duration: 1.45s
🔢 Tokens: 156
💰 Estimated cost: $0.0004
```
""")
# Footer
st.markdown("---")
st.markdown("*Watch the console output to see LLM interaction callbacks in action!*") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_1_agent_lifecycle_callbacks/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_1_agent_lifecycle_callbacks/agent.py | import os
import asyncio
from datetime import datetime
from typing import Optional
from google.adk.agents import LlmAgent
from google.adk.agents.callback_context import CallbackContext
from google.adk.runners import InMemoryRunner
from google.genai import types
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# --- 1. Define the Callback Functions ---
def before_agent_callback(callback_context: CallbackContext) -> Optional[types.Content]:
"""Callback before agent execution starts"""
agent_name = callback_context.agent_name
start_time = datetime.now()
print(f"🚀 Agent {agent_name} started at {start_time.strftime('%H:%M:%S')}")
print(f"⏰ Start time: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
print() # Add spacing
# Store start time in state for after callback
current_state = callback_context.state.to_dict()
current_state["start_time"] = start_time.isoformat()
callback_context.state.update(current_state)
return None
def after_agent_callback(callback_context: CallbackContext) -> Optional[types.Content]:
"""Callback after agent execution completes"""
agent_name = callback_context.agent_name
current_state = callback_context.state.to_dict()
# Get start time from state
start_time_str = current_state.get("start_time")
if start_time_str:
start_time = datetime.fromisoformat(start_time_str)
end_time = datetime.now()
duration = end_time - start_time
duration_seconds = duration.total_seconds()
print(f"✅ Agent {agent_name} completed")
print(f"⏱️ Duration: {duration_seconds:.2f}s")
print(f"⏰ End time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
print(f"📊 Performance: {duration_seconds:.2f}s | {agent_name}")
print() # Add spacing
return None
# --- 2. Setup Agent with Callbacks ---
llm_agent_with_callbacks = LlmAgent(
name="agent_lifecycle_demo_agent",
model="gemini-3-flash-preview",
instruction="You are a helpful assistant. Respond to user questions clearly and concisely.",
description="An LLM agent demonstrating lifecycle callbacks for monitoring",
before_agent_callback=before_agent_callback,
after_agent_callback=after_agent_callback
)
# --- 3. Setup Runner and Sessions ---
runner = InMemoryRunner(agent=llm_agent_with_callbacks, app_name="agent_lifecycle_callback_demo")
async def run_agent(message: str) -> str:
"""Run the agent with the given message"""
user_id = "demo_user"
session_id = "demo_session"
# Get the bundled session service
session_service = runner.session_service
# Get or create session
session = await session_service.get_session(
app_name="agent_lifecycle_callback_demo",
user_id=user_id,
session_id=session_id
)
if not session:
session = await session_service.create_session(
app_name="agent_lifecycle_callback_demo",
user_id=user_id,
session_id=session_id,
state={"conversation_history": []}
)
# Create user content
user_content = types.Content(
role='user',
parts=[types.Part(text=message)]
)
# Run agent and get response
response_text = ""
async for event in runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content
):
if event.is_final_response() and event.content:
response_text = event.content.parts[0].text.strip()
# Don't break here - let the loop complete naturally to ensure after_agent_callback runs
return response_text
# --- 4. Execute ---
if __name__ == "__main__":
print("\n" + "="*50 + " Agent Lifecycle Callbacks Demo " + "="*50)
# Test messages
test_messages = [
"Hello, how are you?"
]
async def test_agent():
for i, message in enumerate(test_messages, 1):
print(f"\n--- Test {i}: {message} ---")
response = await run_agent(message)
print(f"🤖 Response: {response}")
asyncio.run(test_agent()) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_1_agent_lifecycle_callbacks/app.py | ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_1_agent_lifecycle_callbacks/app.py | #!/usr/bin/env python3
"""
Streamlit App for Agent Lifecycle Callbacks Demo
"""
import streamlit as st
import asyncio
from agent import llm_agent_with_callbacks, runner
from google.genai import types
# Page configuration
st.set_page_config(
page_title="Agent Lifecycle Callbacks Demo",
page_icon="🔄",
layout="wide"
)
# Title and description
st.title("🔄 Agent Lifecycle Callbacks Demo")
st.markdown("""
This demo shows how to use `before_agent_callback` and `after_agent_callback` to monitor agent execution.
Watch the console output to see the callback timing information.
""")
# Sidebar
with st.sidebar:
st.header("📊 Callback Information")
st.markdown("""
**Before Callback:**
- Records start time
- Logs agent execution start
**After Callback:**
- Calculates execution duration
- Logs completion time
""")
st.header("🔧 Technical Details")
st.markdown("""
- Uses `InMemoryRunner` for session management
- Callbacks receive `CallbackContext` with agent info
- State is shared between callbacks via session
""")
# Main chat interface
st.header("💬 Chat with Agent")
# Define the get_response function
async def get_response(prompt_text: str) -> str:
"""Run agent with the given prompt"""
user_id = "demo_user"
session_id = "demo_session"
# Get the bundled session service
session_service = runner.session_service
# Get or create session
session = await session_service.get_session(
app_name="agent_lifecycle_callback_demo",
user_id=user_id,
session_id=session_id
)
if not session:
session = await session_service.create_session(
app_name="agent_lifecycle_callback_demo",
user_id=user_id,
session_id=session_id
)
# Create user content
user_content = types.Content(
role='user',
parts=[types.Part(text=prompt_text)]
)
# Run agent and get response
response_text = ""
async for event in runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content
):
if event.is_final_response() and event.content:
response_text = event.content.parts[0].text.strip()
# Don't break - let the loop complete to ensure callbacks run
return response_text
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Ask me anything..."):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Add assistant response to chat history
with st.chat_message("assistant"):
message_placeholder = st.empty()
# Show loading message
message_placeholder.markdown("🤔 Thinking...")
# Get response
response = asyncio.run(get_response(prompt))
# Update placeholder with response
message_placeholder.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
# Quick test buttons
st.markdown("---")
st.header("⚡ Quick Tests")
col1, col2, col3 = st.columns(3)
with col1:
if st.button("👋 Greeting Test"):
with st.chat_message("user"):
st.markdown("Hello, how are you?")
with st.chat_message("assistant"):
with st.spinner("🤖 Agent is processing..."):
response = asyncio.run(get_response("Hello, how are you?"))
st.markdown(response)
with col2:
if st.button("🧮 Math Test"):
with st.chat_message("user"):
st.markdown("What's 2 + 2?")
with st.chat_message("assistant"):
with st.spinner("🤖 Agent is processing..."):
response = asyncio.run(get_response("What's 2 + 2?"))
st.markdown(response)
with col3:
if st.button("😄 Joke Test"):
with st.chat_message("user"):
st.markdown("Tell me a short joke")
with st.chat_message("assistant"):
with st.spinner("🤖 Agent is processing..."):
response = asyncio.run(get_response("Tell me a short joke"))
st.markdown(response)
# Clear chat button
if st.button("🗑️ Clear Chat"):
st.session_state.messages = []
st.rerun()
# Footer
st.markdown("---")
st.markdown("""
<div style='text-align: center; color: #666;'>
<p>Check the console/terminal for callback timing information</p>
</div>
""", unsafe_allow_html=True) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_3_tool_execution_callbacks/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_3_tool_execution_callbacks/agent.py | import os
import asyncio
import time
from datetime import datetime
from typing import Optional, Dict, Any
from google.adk.agents import LlmAgent
from google.adk.runners import InMemoryRunner
from google.adk.tools import BaseTool, FunctionTool
from google.adk.tools.tool_context import ToolContext
from google.genai import types
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
def calculator_tool(operation: str, a: float, b: float) -> str:
"""Simple calculator tool with basic operations"""
if operation == "add":
return f"{a} + {b} = {a + b}"
elif operation == "subtract":
return f"{a} - {b} = {a - b}"
elif operation == "multiply":
return f"{a} × {b} = {a * b}"
elif operation == "divide":
if b == 0:
return "Error: Division by zero"
return f"{a} ÷ {b} = {a / b}"
else:
return f"Unknown operation: {operation}"
# Create FunctionTool from the calculator function
calculator_function_tool = FunctionTool(func=calculator_tool)
def before_tool_callback(tool: BaseTool, args: Dict[str, Any], tool_context: ToolContext) -> Optional[Dict[str, Any]]:
"""Callback before tool execution starts"""
agent_name = tool_context.agent_name
tool_name = tool.name
start_time = time.time()
print(f"🔧 Tool {tool_name} started")
print(f"📝 Parameters: {args}")
print(f"📋 Agent: {agent_name}")
print() # Add spacing
# Store start time in tool_context state for after callback
current_state = tool_context.state.to_dict()
current_state["tool_start_time"] = start_time
tool_context.state.update(current_state)
# Return None to allow normal execution
return None
def after_tool_callback(tool: BaseTool, args: Dict[str, Any], tool_context: ToolContext, tool_response: Any) -> Optional[Any]:
"""Callback after tool execution completes"""
agent_name = tool_context.agent_name
tool_name = tool.name
current_state = tool_context.state.to_dict()
# Get start time from state and calculate duration
start_time = current_state.get("tool_start_time")
if start_time:
end_time = time.time()
duration_seconds = end_time - start_time
print(f"✅ Tool {tool_name} completed")
print(f"⏱️ Duration: {duration_seconds:.4f}s")
print(f"📄 Result: {tool_response}")
print() # Add spacing
else:
print(f"✅ Tool {tool_name} completed")
print(f"📄 Result: {tool_response}")
print() # Add spacing
# Return None to use the original tool response
return None
# --- 2. Setup Agent with Tool Callbacks ---
llm_agent_with_tool_callbacks = LlmAgent(
name="tool_execution_demo_agent",
model="gemini-3-flash-preview",
instruction="You are a helpful assistant with calculator tools. When users ask for calculations, use the calculator_tool with appropriate parameters and provide clear explanations of the results.",
description="An LLM agent demonstrating tool execution callbacks for monitoring",
tools=[calculator_function_tool],
before_tool_callback=before_tool_callback,
after_tool_callback=after_tool_callback
)
# --- 3. Setup Runner and Sessions ---
runner = InMemoryRunner(agent=llm_agent_with_tool_callbacks, app_name="tool_execution_callback_demo")
async def run_agent(message: str) -> str:
"""Run the agent with the given message"""
user_id = "demo_user"
session_id = "demo_session"
# Get the bundled session service
session_service = runner.session_service
# Get or create session
session = await session_service.get_session(
app_name="tool_execution_callback_demo",
user_id=user_id,
session_id=session_id
)
if not session:
session = await session_service.create_session(
app_name="tool_execution_callback_demo",
user_id=user_id,
session_id=session_id,
state={"conversation_history": []}
)
# Create user content
user_content = types.Content(
role='user',
parts=[types.Part(text=message)]
)
# Run agent and get response
response_text = ""
async for event in runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content
):
if event.is_final_response() and event.content:
response_text = event.content.parts[0].text.strip()
# Don't break - let the loop complete naturally to ensure callbacks run
return response_text
# --- 4. Execute ---
if __name__ == "__main__":
print("\n" + "="*50 + " Tool Execution Callbacks Demo " + "="*50)
# Test messages
test_messages = [
"Calculate 15 + 27",
"What is 100 divided by 4?",
"Multiply 8 by 12",
"What is 50 minus 23?"
]
async def test_agent():
for i, message in enumerate(test_messages, 1):
print(f"\n--- Test {i}: {message} ---")
response = await run_agent(message)
print(f"🤖 Response: {response}")
asyncio.run(test_agent()) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_3_tool_execution_callbacks/app.py | ai_agent_framework_crash_course/google_adk_crash_course/6_callbacks/6_3_tool_execution_callbacks/app.py | #!/usr/bin/env python3
"""
Streamlit App for Tool Execution Callbacks Demo
"""
import streamlit as st
import sys
import os
import asyncio
from agent import run_agent
# Page configuration
st.set_page_config(
page_title="Tool Execution Callbacks",
page_icon="🔧",
layout="wide"
)
# Title and description
st.title("🔧 Tool Execution Callbacks Demo")
st.markdown("""
This demo shows how to monitor tool execution using callbacks.
Watch the console output to see detailed tool execution tracking!
""")
# Sidebar with information
with st.sidebar:
st.header("📊 Tool Execution Monitoring")
st.markdown("""
**Before Tool Callback**
- Triggered when a tool starts execution
- Logs tool name and input parameters
- Records agent name
- Stores start time for duration tracking
**After Tool Callback**
- Triggered when a tool finishes execution
- Logs tool result
- Calculates and displays execution duration
- Handles errors (e.g., division by zero)
""")
st.markdown("---")
st.markdown("### 🧮 Available Tools")
st.markdown("""
**Calculator Tool**:
- Addition: `add`
- Subtraction: `subtract`
- Multiplication: `multiply`
- Division: `divide`
""")
# Main chat interface
st.header("💬 Chat with Tool Monitor")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Ask me to calculate something..."):
# Add user message to chat
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Get agent response
with st.chat_message("assistant"):
with st.spinner("🔧 Tool is executing..."):
response = asyncio.run(run_agent(prompt))
st.markdown(response)
# Add assistant response to chat
st.session_state.messages.append({"role": "assistant", "content": response})
# Quick test buttons
st.markdown("---")
st.header("⚡ Quick Tests")
col1, col2, col3 = st.columns(3)
with col1:
if st.button("➕ Addition Test"):
test_message = "Calculate 15 + 27"
st.session_state.messages.append({"role": "user", "content": test_message})
with st.chat_message("user"):
st.markdown(test_message)
with st.chat_message("assistant"):
with st.spinner("🔧 Tool is executing..."):
response = asyncio.run(run_agent(test_message))
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
with col2:
if st.button("➗ Division Test"):
test_message = "What is 100 divided by 4?"
st.session_state.messages.append({"role": "user", "content": test_message})
with st.chat_message("user"):
st.markdown(test_message)
with st.chat_message("assistant"):
with st.spinner("🔧 Tool is executing..."):
response = asyncio.run(run_agent(test_message))
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
with col3:
if st.button("❌ Error Test"):
test_message = "Calculate 10 divided by 0"
st.session_state.messages.append({"role": "user", "content": test_message})
with st.chat_message("user"):
st.markdown(test_message)
with st.chat_message("assistant"):
with st.spinner("🔧 Tool is executing..."):
response = asyncio.run(run_agent(test_message))
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
# Clear chat button
if st.button("🗑️ Clear Chat History"):
st.session_state.messages = []
st.rerun()
# Information about callbacks
st.markdown("---")
st.header("📋 Tool Callback Output")
st.markdown("""
**Check your console/terminal** to see the tool execution output:
```
🔧 Tool calculator_tool started
📝 Parameters: {'operation': 'add', 'a': 15.0, 'b': 27.0}
📋 Agent: tool_execution_demo_agent
✅ Tool calculator_tool completed
⏱️ Duration: 0.0012s
📄 Result: 15 + 27 = 42
```
""")
# Footer
st.markdown("---")
st.markdown("*Watch the console output to see tool execution callbacks in action!*") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/7_plugins/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/7_plugins/agent.py | import asyncio
from datetime import datetime
from typing import Optional, Dict, Any
from google.adk.agents import LlmAgent
from google.adk.agents.base_agent import BaseAgent
from google.adk.agents.callback_context import CallbackContext
from google.adk.plugins.base_plugin import BasePlugin
from google.adk.runners import InMemoryRunner
from google.adk.tools.base_tool import BaseTool
from google.adk.tools.tool_context import ToolContext
from google.genai import types
from dotenv import load_dotenv
# Load environment variables (API key)
load_dotenv()
# ============================================================================
# PLUGIN DEFINITION
# ============================================================================
# Plugins extend BasePlugin and provide global callbacks across all agents/tools
class SimplePlugin(BasePlugin):
def __init__(self) -> None:
super().__init__(name="simple_plugin")
# Track usage statistics across all executions
self.agent_count = 0
self.tool_count = 0
# Called when user sends a message - can modify the input
async def on_user_message_callback(self, *, invocation_context, user_message: types.Content) -> Optional[types.Content]:
timestamp = datetime.now().strftime("%H:%M:%S")
print(f"🔍 [Plugin] User message at {timestamp}")
# Add timestamp to each message part for context
modified_parts = [types.Part(text=f"[{timestamp}] {part.text}") for part in user_message.parts if hasattr(part, 'text')]
return types.Content(role='user', parts=modified_parts)
# Called before each agent execution - good for logging and setup
async def before_agent_callback(self, *, agent: BaseAgent, callback_context: CallbackContext) -> None:
self.agent_count += 1
print(f"🤖 [Plugin] Agent {agent.name} starting (count: {self.agent_count})")
# Called before each tool execution - track tool usage
async def before_tool_callback(self, *, tool: BaseTool, tool_args: Dict[str, Any], tool_context: ToolContext) -> None:
self.tool_count += 1
print(f"🔧 [Plugin] Tool {tool.name} starting (count: {self.tool_count})")
# Called after the entire run completes - generate final report
async def after_run_callback(self, *, invocation_context) -> None:
print(f"📊 [Plugin] Final Report: {self.agent_count} agents, {self.tool_count} tools")
# ============================================================================
# TOOL DEFINITION
# ============================================================================
# This tool can fail (division by zero) to demonstrate error handling
async def calculator_tool(tool_context: ToolContext, operation: str, a: float, b: float) -> Dict[str, Any]:
print(f"🔧 [Tool] Calculator: {operation}({a}, {b})")
if operation == "divide" and b == 0:
raise ValueError("Division by zero is not allowed")
# Dictionary of operations for cleaner code
ops = {"add": lambda x, y: x + y, "subtract": lambda x, y: x - y, "multiply": lambda x, y: x * y, "divide": lambda x, y: x / y}
if operation not in ops:
raise ValueError(f"Unknown operation: {operation}")
return {"operation": operation, "a": a, "b": b, "result": ops[operation](a, b)}
# ============================================================================
# AGENT AND RUNNER SETUP
# ============================================================================
# Create agent with the calculator tool
agent = LlmAgent(name="plugin_demo_agent", model="gemini-3-flash-preview",
instruction="You are a helpful assistant that can perform calculations. Use the calculator_tool when needed.",
tools=[calculator_tool])
# Create runner and register the plugin - this makes the plugin global
runner = InMemoryRunner(agent=agent, app_name="plugin_demo_app", plugins=[SimplePlugin()])
# ============================================================================
# AGENT EXECUTION FUNCTION
# ============================================================================
async def run_agent(message: str) -> str:
# Session management for conversation state
user_id, session_id = "demo_user", "demo_session"
session_service = runner.session_service
# Get or create session (required for ADK)
session = await session_service.get_session(app_name="plugin_demo_app", user_id=user_id, session_id=session_id)
if not session:
session = await session_service.create_session(app_name="plugin_demo_app", user_id=user_id, session_id=session_id)
# Create user message content
user_content = types.Content(role='user', parts=[types.Part(text=message)])
# Run agent and collect response - plugin callbacks will fire automatically
response_text = ""
async for event in runner.run_async(user_id=user_id, session_id=session_id, new_message=user_content):
if event.content and event.content.parts:
for part in event.content.parts:
if hasattr(part, 'text') and part.text:
response_text += part.text
return response_text if response_text else "No response received from agent."
# ============================================================================
# MAIN EXECUTION
# ============================================================================
if __name__ == "__main__":
# Test the plugin functionality
asyncio.run(run_agent("what is 2 + 2?"))
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/7_plugins/app.py | ai_agent_framework_crash_course/google_adk_crash_course/7_plugins/app.py | import streamlit as st
import asyncio
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent))
from agent import run_agent
st.set_page_config(page_title="Google ADK Plugins Tutorial", page_icon="🔌")
st.title("🔌 Google ADK Plugins Tutorial")
st.markdown("Demonstrates plugins for cross-cutting concerns like logging and monitoring.")
test_scenarios = {
"Normal Conversation": "Hello! How are you?",
"Simple Calculation": "Calculate 15 + 27",
"Error Handling": "What is 10 divided by 0?"
}
selected_scenario = st.selectbox("Choose a test scenario:", list(test_scenarios.keys()))
if st.button("🚀 Run Test"):
with st.spinner("Running..."):
try:
response = asyncio.run(run_agent(test_scenarios[selected_scenario]))
st.success("**Agent Response:**")
st.write(response)
except Exception as e:
st.error(f"Error: {str(e)}")
st.markdown("---")
custom_message = st.text_area("Or enter your own message:", placeholder="Type here...")
if st.button("🚀 Run Custom Message"):
if custom_message.strip():
with st.spinner("Processing..."):
try:
response = asyncio.run(run_agent(custom_message))
st.success("**Agent Response:**")
st.write(response)
except Exception as e:
st.error(f"Error: {str(e)}")
else:
st.warning("Please enter a message.")
with st.expander("📚 About Plugins"):
st.markdown("""
**Plugins** are custom code modules that execute at various stages of agent workflow lifecycle.
**Key Features:**
- 🔍 Request logging and modification
- 🤖 Agent execution tracking
- 🔧 Tool usage monitoring
- 📊 Final reporting and analytics
**Plugin Callbacks:**
- `on_user_message_callback()` - Modify user input
- `before_agent_callback()` - Track agent starts
- `before_tool_callback()` - Track tool usage
- `after_run_callback()` - Generate reports
""")
st.markdown("---")
st.markdown("*Part of the Google ADK Crash Course*")
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/1_starter_agent/creative_writing_agent/__init__.py | ai_agent_framework_crash_course/google_adk_crash_course/1_starter_agent/creative_writing_agent/__init__.py | from .agent import root_agent | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/1_starter_agent/creative_writing_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/1_starter_agent/creative_writing_agent/agent.py | from google.adk.agents import LlmAgent
# Create a creative writing agent
root_agent = LlmAgent(
name="creative_writing_agent",
model="gemini-3-flash-preview",
description="A creative writing assistant that helps with stories, poems, and creative content",
instruction="""
You are a creative writing assistant.
Your role is to:
- Help users develop story ideas
- Assist with character development
- Provide writing prompts and inspiration
- Help with plot structure and pacing
- Offer feedback on creative writing
When users want to write creatively:
- Ask engaging questions to develop ideas
- Suggest creative elements and themes
- Help structure stories and narratives
- Provide constructive feedback
Keep responses creative, inspiring, and supportive of artistic expression.
"""
) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/5_memory_agent/5_1_in_memory_conversation_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/5_memory_agent/5_1_in_memory_conversation_agent/agent.py | import asyncio
import os
import uuid
from google.adk.agents import LlmAgent
from google.adk.sessions import InMemorySessionService
from google.adk.runners import Runner
from google.genai import types
from dotenv import load_dotenv
# Load environment variables (for API key)
load_dotenv()
# Create session service and agent
session_service = InMemorySessionService()
agent = LlmAgent(
name="memory_agent",
model="gemini-3-flash-preview",
description="A simple agent that remembers conversations",
instruction="You are a helpful assistant. Remember what users tell you and reference it in future conversations."
)
# Create runner with session service
runner = Runner(
agent=agent,
app_name="demo",
session_service=session_service
)
async def chat(user_id: str, message: str) -> str:
"""Simple chat function with memory using Runner"""
session_id = f"session_{user_id}"
# Create or get session
session = await session_service.get_session(app_name="demo", user_id=user_id, session_id=session_id)
if not session:
# Create new session with initial state
session = await session_service.create_session(
app_name="demo",
user_id=user_id,
session_id=session_id,
state={"conversation_history": []}
)
# Create user content
user_content = types.Content(
role='user',
parts=[types.Part(text=message)]
)
# Run the agent with session
response_text = ""
async for event in runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content
):
if event.is_final_response():
if event.content and event.content.parts:
response_text = event.content.parts[0].text
break
return response_text
# Test the memory
if __name__ == "__main__":
async def test():
user_id = "test_user"
messages = ["My name is Alice", "What's my name?", "I love pizza", "What do I love?"]
for msg in messages:
print(f"\nUser: {msg}")
response = await chat(user_id, msg)
print(f"Assistant: {response}")
asyncio.run(test()) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/5_memory_agent/5_1_in_memory_conversation_agent/app.py | ai_agent_framework_crash_course/google_adk_crash_course/5_memory_agent/5_1_in_memory_conversation_agent/app.py | import streamlit as st
import asyncio
from agent import chat
# Page configuration
st.set_page_config(page_title="In-Memory Agent", page_icon="🧠")
# Title
st.title("🧠 In-Memory Conversation Agent")
st.markdown("Simple demo of `InMemorySessionService` - agent remembers conversations within a session.")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Say something..."):
# Add user message
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Get response
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = asyncio.run(chat("demo_user", prompt))
st.markdown(response)
# Add assistant response
st.session_state.messages.append({"role": "assistant", "content": response})
# Clear button
if st.button("Clear Chat"):
st.session_state.messages = []
st.rerun()
# Sidebar info
with st.sidebar:
st.header("ℹ️ How it works")
st.markdown("""
1. **Session Creation**: Creates session for user
2. **State Storage**: Saves conversation history
3. **Memory Retrieval**: Uses previous context
4. **Temporary**: Lost when app restarts
**Test**: Tell it your name, then ask "What's my name?"
""") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/5_memory_agent/5_2_persistent_conversation_agent/agent.py | ai_agent_framework_crash_course/google_adk_crash_course/5_memory_agent/5_2_persistent_conversation_agent/agent.py | import asyncio
import os
from google.adk.agents import LlmAgent
from google.adk.sessions import DatabaseSessionService
from google.adk.runners import Runner
from google.genai import types
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Create database session service for persistent storage
session_service = DatabaseSessionService(
db_url="sqlite:///sessions.db"
)
# Create a simple agent with persistent memory
agent = LlmAgent(
name="persistent_agent",
model="gemini-3-flash-preview",
description="A simple agent that remembers conversations in a database",
instruction="You are a helpful assistant. Remember what users tell you and reference it in future conversations. Your memory persists across program restarts."
)
# Create runner with database session service
runner = Runner(
agent=agent,
app_name="demo",
session_service=session_service
)
async def chat(user_id: str, message: str) -> str:
"""Simple chat function with persistent database memory"""
session_id = f"session_{user_id}"
# Get or create session
session = await session_service.get_session(
app_name="demo",
user_id=user_id,
session_id=session_id
)
if not session:
# Create new session with initial state
session = await session_service.create_session(
app_name="demo",
user_id=user_id,
session_id=session_id,
state={"conversation_history": []}
)
# Create user content
user_content = types.Content(
role='user',
parts=[types.Part(text=message)]
)
# Run the agent with session
response_text = ""
async for event in runner.run_async(
user_id=user_id,
session_id=session_id,
new_message=user_content
):
if event.is_final_response():
if event.content and event.content.parts:
response_text = event.content.parts[0].text
break
return response_text
# Test the persistent memory
if __name__ == "__main__":
async def test():
# Initialize database
await session_service.initialize()
print("✅ Database initialized")
user_id = "test_user"
messages = ["My name is Bob", "What's my name?", "I love coding", "What do I love?"]
for msg in messages:
print(f"\nUser: {msg}")
response = await chat(user_id, msg)
print(f"Assistant: {response}")
asyncio.run(test()) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/google_adk_crash_course/5_memory_agent/5_2_persistent_conversation_agent/app.py | ai_agent_framework_crash_course/google_adk_crash_course/5_memory_agent/5_2_persistent_conversation_agent/app.py | import streamlit as st
import asyncio
from agent import chat, session_service
# Page configuration
st.set_page_config(page_title="Persistent Agent", page_icon="🗄️")
# Title
st.title("🗄️ Persistent Conversation Agent")
st.markdown("Simple demo of `DatabaseSessionService` - agent remembers conversations across program restarts using SQLite database.")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Say something..."):
# Add user message
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Get response
with st.chat_message("assistant"):
with st.spinner("Thinking with persistent memory..."):
response = asyncio.run(chat("demo_user", prompt))
st.markdown(response)
# Add assistant response
st.session_state.messages.append({"role": "assistant", "content": response})
# Clear button
if st.button("Clear Chat"):
st.session_state.messages = []
st.rerun()
# Sidebar info
with st.sidebar:
st.header("ℹ️ How it works")
st.markdown("""
1. **Database Storage**: Uses SQLite database (sessions.db)
2. **Persistent Memory**: Survives program restarts
3. **Cross-Session**: Remembers across multiple sessions
4. **Simple State**: Basic conversation history
**Test**: Tell it your name, restart the app, ask "What's my name?"
**Database**: Check `sessions.db` file in project directory
""")
st.markdown("---")
st.markdown("### 🗄️ Database Info")
st.markdown("""
**File:** `sessions.db`
**Type:** SQLite database
**Persistence:** Survives restarts
**Location:** Project directory
""") | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/mcp_ai_agents/ai_travel_planner_mcp_agent_team/app.py | mcp_ai_agents/ai_travel_planner_mcp_agent_team/app.py | import re
import asyncio
from textwrap import dedent
from agno.agent import Agent
from agno.run.agent import RunOutput
from agno.tools.mcp import MultiMCPTools
from agno.tools.googlesearch import GoogleSearchTools
from agno.models.openai import OpenAIChat
from icalendar import Calendar, Event
from datetime import datetime, timedelta
import streamlit as st
from datetime import date
import os
def generate_ics_content(plan_text: str, start_date: datetime = None) -> bytes:
"""
Generate an ICS calendar file from a travel itinerary text.
Args:
plan_text: The travel itinerary text
start_date: Optional start date for the itinerary (defaults to today)
Returns:
bytes: The ICS file content as bytes
"""
cal = Calendar()
cal.add('prodid','-//AI Travel Planner//github.com//')
cal.add('version', '2.0')
if start_date is None:
start_date = datetime.today()
# Split the plan into days
day_pattern = re.compile(r'Day (\d+)[:\s]+(.*?)(?=Day \d+|$)', re.DOTALL)
days = day_pattern.findall(plan_text)
if not days: # If no day pattern found, create a single all-day event with the entire content
event = Event()
event.add('summary', "Travel Itinerary")
event.add('description', plan_text)
event.add('dtstart', start_date.date())
event.add('dtend', start_date.date())
event.add("dtstamp", datetime.now())
cal.add_component(event)
else:
# Process each day
for day_num, day_content in days:
day_num = int(day_num)
current_date = start_date + timedelta(days=day_num - 1)
# Create a single event for the entire day
event = Event()
event.add('summary', f"Day {day_num} Itinerary")
event.add('description', day_content.strip())
# Make it an all-day event
event.add('dtstart', current_date.date())
event.add('dtend', current_date.date())
event.add("dtstamp", datetime.now())
cal.add_component(event)
return cal.to_ical()
async def run_mcp_travel_planner(destination: str, num_days: int, preferences: str, budget: int, openai_key: str, google_maps_key: str):
"""Run the MCP-based travel planner agent with real-time data access."""
try:
# Set Google Maps API key environment variable
os.environ["GOOGLE_MAPS_API_KEY"] = google_maps_key
# Initialize MCPTools with Airbnb MCP
mcp_tools = MultiMCPTools(
[
"npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt",
"npx @gongrzhe/server-travelplanner-mcp",
],
env={
"GOOGLE_MAPS_API_KEY": google_maps_key,
},
timeout_seconds=60,
)
# Connect to Airbnb MCP server
await mcp_tools.connect()
travel_planner = Agent(
name="Travel Planner",
role="Creates travel itineraries using Airbnb, Google Maps, and Google Search",
model=OpenAIChat(id="gpt-4o", api_key=openai_key),
description=dedent(
"""\
You are a professional travel consultant AI that creates highly detailed travel itineraries directly without asking questions.
You have access to:
🏨 Airbnb listings with real availability and current pricing
🗺️ Google Maps MCP for location services, directions, distance calculations, and local navigation
🔍 Web search capabilities for current information, reviews, and travel updates
ALWAYS create a complete, detailed itinerary immediately without asking for clarification or additional information.
Use Google Maps MCP extensively to calculate distances between all locations and provide precise travel times.
If information is missing, use your best judgment and available tools to fill in the gaps.
"""
),
instructions=[
"IMPORTANT: Never ask questions or request clarification - always generate a complete itinerary",
"Research the destination thoroughly using all available tools to gather comprehensive current information",
"Find suitable accommodation options within the budget using Airbnb MCP with real prices and availability",
"Create an extremely detailed day-by-day itinerary with specific activities, locations, exact timing, and distances",
"Use Google Maps MCP extensively to calculate distances between ALL locations and provide travel times",
"Include detailed transportation options and turn-by-turn navigation tips using Google Maps MCP",
"Research dining options with specific restaurant names, addresses, price ranges, and distance from accommodation",
"Check current weather conditions, seasonal factors, and provide detailed packing recommendations",
"Calculate precise estimated costs for EVERY aspect of the trip and ensure recommendations fit within budget",
"Include detailed information about each attraction: opening hours, ticket prices, best visiting times, and distance from accommodation",
"Add practical information including local transportation costs, currency exchange, safety tips, and cultural norms",
"Structure the itinerary with clear sections, detailed timing for each activity, and include buffer time between activities",
"Use all available tools proactively without asking for permission",
"Generate the complete, detailed itinerary in one response without follow-up questions"
],
tools=[mcp_tools, GoogleSearchTools()],
add_datetime_to_context=True,
markdown=True,
debug_mode=False,
)
# Create the planning prompt
prompt = f"""
IMMEDIATELY create an extremely detailed and comprehensive travel itinerary for:
**Destination:** {destination}
**Duration:** {num_days} days
**Budget:** ${budget} USD total
**Preferences:** {preferences}
DO NOT ask any questions. Generate a complete, highly detailed itinerary now using all available tools.
**CRITICAL REQUIREMENTS:**
- Use Google Maps MCP to calculate distances and travel times between ALL locations
- Include specific addresses for every location, restaurant, and attraction
- Provide detailed timing for each activity with buffer time between locations
- Calculate precise costs for transportation between each location
- Include opening hours, ticket prices, and best visiting times for all attractions
- Provide detailed weather information and specific packing recommendations
**REQUIRED OUTPUT FORMAT:**
1. **Trip Overview** - Summary, total estimated cost breakdown, detailed weather forecast
2. **Accommodation** - 3 specific Airbnb options with real prices, addresses, amenities, and distance from city center
3. **Transportation Overview** - Detailed transportation options, costs, and recommendations
4. **Day-by-Day Itinerary** - Extremely detailed schedule with:
- Specific start/end times for each activity
- Exact distances and travel times between locations (use Google Maps MCP)
- Detailed descriptions of each location with addresses
- Opening hours, ticket prices, and best visiting times
- Estimated costs for each activity and transportation
- Buffer time between activities for unexpected delays
5. **Dining Plan** - Specific restaurants with addresses, price ranges, cuisine types, and distance from accommodation
6. **Detailed Practical Information**:
- Weather forecast with clothing recommendations
- Currency exchange rates and costs
- Local transportation options and costs
- Safety information and emergency contacts
- Cultural norms and etiquette tips
- Communication options (SIM cards, WiFi, etc.)
- Health and medical considerations
- Shopping and souvenir recommendations
Use Airbnb MCP for real accommodation data, Google Maps MCP for ALL distance calculations and location services, and web search for current information.
Make reasonable assumptions and fill in any gaps with your knowledge.
Generate the complete, highly detailed itinerary in one response without asking for clarification.
"""
response: RunOutput = await travel_planner.arun(prompt)
return response.content
finally:
await mcp_tools.close()
def run_travel_planner(destination: str, num_days: int, preferences: str, budget: int, openai_key: str, google_maps_key: str):
"""Synchronous wrapper for the async MCP travel planner."""
return asyncio.run(run_mcp_travel_planner(destination, num_days, preferences, budget, openai_key, google_maps_key))
# -------------------- Streamlit App --------------------
# Configure the page
st.set_page_config(
page_title="MCP AI Travel Planner",
page_icon="✈️",
layout="wide"
)
# Initialize session state
if 'itinerary' not in st.session_state:
st.session_state.itinerary = None
# Title and description
st.title("✈️ MCP AI Travel Planner")
st.caption("Plan your next adventure with AI Travel Planner using MCP servers for real-time data access")
# Sidebar for API keys
with st.sidebar:
st.header("🔑 API Keys Configuration")
st.warning("⚠️ These services require API keys:")
openai_api_key = st.text_input("OpenAI API Key", type="password", help="Required for AI planning")
google_maps_key = st.text_input("Google Maps API Key", type="password", help="Required for location services")
# Check if API keys are provided (both OpenAI and Google Maps are required)
api_keys_provided = openai_api_key and google_maps_key
if api_keys_provided:
st.success("✅ All API keys configured!")
else:
st.warning("⚠️ Please enter both API keys to use the travel planner.")
st.info("""
**Required API Keys:**
- **OpenAI API Key**: https://platform.openai.com/api-keys
- **Google Maps API Key**: https://console.cloud.google.com/apis/credentials (for location services)
""")
# Main content (only shown if API keys are provided)
if api_keys_provided:
# Main input section
st.header("🌍 Trip Details")
col1, col2 = st.columns(2)
with col1:
destination = st.text_input("Destination", placeholder="e.g., Paris, Tokyo, New York")
num_days = st.number_input("Number of Days", min_value=1, max_value=30, value=7)
with col2:
budget = st.number_input("Budget (USD)", min_value=100, max_value=10000, step=100, value=2000)
start_date = st.date_input("Start Date", min_value=date.today(), value=date.today())
# Preferences section
st.subheader("🎯 Travel Preferences")
preferences_input = st.text_area(
"Describe your travel preferences",
placeholder="e.g., adventure activities, cultural sites, food, relaxation, nightlife...",
height=100
)
# Quick preference buttons
quick_prefs = st.multiselect(
"Quick Preferences (optional)",
["Adventure", "Relaxation", "Sightseeing", "Cultural Experiences",
"Beach", "Mountain", "Luxury", "Budget-Friendly", "Food & Dining",
"Shopping", "Nightlife", "Family-Friendly"],
help="Select multiple preferences or describe in detail above"
)
# Combine preferences
all_preferences = []
if preferences_input:
all_preferences.append(preferences_input)
if quick_prefs:
all_preferences.extend(quick_prefs)
preferences = ", ".join(all_preferences) if all_preferences else "General sightseeing"
# Generate button
col1, col2 = st.columns([1, 1])
with col1:
if st.button("🎯 Generate Itinerary", type="primary"):
if not destination:
st.error("Please enter a destination.")
elif not preferences:
st.warning("Please describe your preferences or select quick preferences.")
else:
tools_message = "🏨 Connecting to Airbnb MCP"
if google_maps_key:
tools_message += " and Google Maps MCP"
tools_message += ", creating itinerary..."
with st.spinner(tools_message):
try:
# Calculate number of days from start date
response = run_travel_planner(
destination=destination,
num_days=num_days,
preferences=preferences,
budget=budget,
openai_key=openai_api_key,
google_maps_key=google_maps_key or ""
)
# Store the response in session state
st.session_state.itinerary = response
# Show MCP connection status
if "Airbnb" in response and ("listing" in response.lower() or "accommodation" in response.lower()):
st.success("✅ Your travel itinerary is ready with Airbnb data!")
st.info("🏨 Used real Airbnb listings for accommodation recommendations")
else:
st.success("✅ Your travel itinerary is ready!")
st.info("📝 Used general knowledge for accommodation suggestions (Airbnb MCP may have failed to connect)")
except Exception as e:
st.error(f"Error: {str(e)}")
st.info("Please try again or check your internet connection.")
with col2:
if st.session_state.itinerary:
# Generate the ICS file
ics_content = generate_ics_content(st.session_state.itinerary, datetime.combine(start_date, datetime.min.time()))
# Provide the file for download
st.download_button(
label="📅 Download as Calendar",
data=ics_content,
file_name="travel_itinerary.ics",
mime="text/calendar"
)
# Display itinerary
if st.session_state.itinerary:
st.header("📋 Your Travel Itinerary")
st.markdown(st.session_state.itinerary) | python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.