Spaces:
Sleeping
Sleeping
| """ | |
| Dog Weight Calculator Agent - Strands Agents Version | |
| This is a rewrite of the original Hugging Face OpenAI-based ReAct agent | |
| using Amazon's Strands Agents SDK. The original implementation used a | |
| manual ReAct loop with regex parsing. Strands handles all of this | |
| automatically through its model-driven approach. | |
| Original: Manual ReAct loop with OpenAI GPT-4 | |
| New: Strands Agents with native tool calling | |
| """ | |
| import os | |
| import gradio as gr | |
| from strands import Agent, tool | |
| from strands.models.openai import OpenAIModel | |
| # ============================================================================= | |
| # TOOLS | |
| # ============================================================================= | |
| # In Strands, tools are simply Python functions decorated with @tool. | |
| # The framework automatically extracts the function signature, docstring, | |
| # and type hints to create tool specifications for the LLM. | |
| def calculate(expression: str) -> str: | |
| """ | |
| Evaluate a mathematical expression and return the result. | |
| Args: | |
| expression: A mathematical expression to evaluate (e.g., "4 * 7 / 3", "37 + 20") | |
| Returns: | |
| The result of the calculation as a string | |
| """ | |
| try: | |
| # Using eval for simple math - in production, consider using a safer parser | |
| result = eval(expression) | |
| return str(result) | |
| except Exception as e: | |
| return f"Error evaluating expression: {e}" | |
| def average_dog_weight(breed: str) -> str: | |
| """ | |
| Get the average weight of a dog breed. | |
| Args: | |
| breed: The name of the dog breed (e.g., "Border Collie", "Scottish Terrier", "Toy Poodle") | |
| Returns: | |
| A string describing the average weight of the specified breed | |
| """ | |
| # Normalize the breed name for matching | |
| breed_lower = breed.lower() | |
| if "scottish terrier" in breed_lower: | |
| return "Scottish Terriers average 20 lbs" | |
| elif "border collie" in breed_lower: | |
| return "A Border Collie's average weight is 37 lbs" | |
| elif "toy poodle" in breed_lower: | |
| return "A Toy Poodle's average weight is 7 lbs" | |
| elif "bulldog" in breed_lower: | |
| return "A Bulldog weighs 51 lbs" | |
| elif "labrador" in breed_lower: | |
| return "A Labrador Retriever's average weight is 65 lbs" | |
| elif "german shepherd" in breed_lower: | |
| return "A German Shepherd's average weight is 75 lbs" | |
| elif "golden retriever" in breed_lower: | |
| return "A Golden Retriever's average weight is 65 lbs" | |
| elif "beagle" in breed_lower: | |
| return "A Beagle's average weight is 25 lbs" | |
| elif "chihuahua" in breed_lower: | |
| return "A Chihuahua's average weight is 5 lbs" | |
| elif "great dane" in breed_lower: | |
| return "A Great Dane's average weight is 140 lbs" | |
| else: | |
| return f"I don't have specific data for {breed}. An average dog weighs about 50 lbs" | |
| # ============================================================================= | |
| # SYSTEM PROMPT | |
| # ============================================================================= | |
| # With Strands, we don't need to specify the ReAct format in the prompt. | |
| # The framework handles tool selection and execution automatically. | |
| # We just describe the agent's purpose and behavior. | |
| SYSTEM_PROMPT = """ | |
| You are a helpful assistant that specializes in answering questions about dog weights. | |
| You have access to tools that can: | |
| 1. Look up the average weight of specific dog breeds | |
| 2. Perform mathematical calculations | |
| When a user asks about dog weights: | |
| - Use the average_dog_weight tool to look up breed-specific information | |
| - If they ask about multiple dogs, look up each breed separately | |
| - Use the calculate tool for any math (like adding weights together) | |
| Always provide clear, helpful answers about dog weights. | |
| """.strip() | |
| # ============================================================================= | |
| # AGENT SETUP | |
| # ============================================================================= | |
| def create_agent(): | |
| """ | |
| Create and configure the Strands agent. | |
| The agent can use either: | |
| - OpenAI models (requires OPENAI_API_KEY) | |
| - Amazon Bedrock models (requires AWS credentials, default) | |
| """ | |
| # Check for OpenAI API key | |
| openai_api_key = os.environ.get('OPENAI_API_KEY') | |
| if openai_api_key: | |
| # Use OpenAI if API key is available | |
| model = OpenAIModel( | |
| client_args={"api_key": openai_api_key}, | |
| model_id="gpt-4o", | |
| params={ | |
| "temperature": 0, | |
| "max_tokens": 1024 | |
| } | |
| ) | |
| print("Using OpenAI GPT-4o model") | |
| else: | |
| # Fall back to Bedrock (default in Strands) | |
| # Requires AWS credentials to be configured | |
| model = None # Strands uses Bedrock by default | |
| print("Using Amazon Bedrock (default)") | |
| # Create the agent with our tools | |
| if model: | |
| agent = Agent( | |
| model=model, | |
| system_prompt=SYSTEM_PROMPT, | |
| tools=[calculate, average_dog_weight] | |
| ) | |
| else: | |
| agent = Agent( | |
| system_prompt=SYSTEM_PROMPT, | |
| tools=[calculate, average_dog_weight] | |
| ) | |
| return agent | |
| def query(question: str) -> str: | |
| """ | |
| Process a question using the Strands agent. | |
| Unlike the original implementation that required manual loop management | |
| and regex parsing, Strands handles all of this automatically: | |
| - Tool selection based on the question | |
| - Tool execution | |
| - Multi-step reasoning | |
| - Response generation | |
| Args: | |
| question: The user's question about dog weights | |
| Returns: | |
| The agent's response | |
| """ | |
| try: | |
| # Create a fresh agent for each query | |
| agent = create_agent() | |
| # Invoke the agent - Strands handles the entire agentic loop | |
| result = agent(question) | |
| # Extract the final response | |
| # The result object contains the full conversation and metrics | |
| return str(result) | |
| except Exception as e: | |
| return f"Error processing question: {str(e)}" | |
| # ============================================================================= | |
| # GRADIO INTERFACE | |
| # ============================================================================= | |
| def process_question(question: str) -> str: | |
| """Wrapper function for Gradio interface.""" | |
| return query(question) | |
| # Create the Gradio interface | |
| iface = gr.Interface( | |
| fn=process_question, | |
| inputs=gr.Textbox( | |
| label="Enter your question", | |
| placeholder="e.g., I have 2 dogs, a border collie and a scottish terrier. What is their combined weight?", | |
| lines=3 | |
| ), | |
| outputs=gr.Textbox(label="Answer", lines=5), | |
| title="🐕 Dog Weight Calculator (Strands Agents)", | |
| description=""" | |
| Ask about dog weights or perform calculations! | |
| **Examples:** | |
| - How much does a toy poodle weigh? | |
| - I have 2 dogs, a border collie and a scottish terrier. What is their combined weight? | |
| - What's heavier, a Great Dane or a German Shepherd? | |
| *Powered by Amazon Strands Agents SDK* | |
| """, | |
| examples=[ | |
| ["How much does a toy poodle weigh?"], | |
| ["I have 2 dogs, a border collie and a scottish terrier. What is their combined weight?"], | |
| ["What's the average weight of a Labrador Retriever?"], | |
| ["If I have a Chihuahua and a Great Dane, how much do they weigh together?"], | |
| ], | |
| theme=gr.themes.Soft() | |
| ) | |
| # ============================================================================= | |
| # DEMO / TESTING | |
| # ============================================================================= | |
| def run_demo(): | |
| """Run some demo queries to test the agent.""" | |
| print("\n" + "="*60) | |
| print("STRANDS AGENTS - DOG WEIGHT CALCULATOR DEMO") | |
| print("="*60 + "\n") | |
| test_questions = [ | |
| "How much does a toy poodle weigh?", | |
| "I have 2 dogs, a border collie and a scottish terrier. What is their combined weight?", | |
| ] | |
| for question in test_questions: | |
| print(f"Question: {question}") | |
| print("-" * 40) | |
| answer = query(question) | |
| print(f"Answer: {answer}") | |
| print("\n") | |
| if __name__ == "__main__": | |
| import sys | |
| if "--demo" in sys.argv: | |
| # Run demo mode | |
| run_demo() | |
| else: | |
| # Launch Gradio interface | |
| iface.launch() |