| | import os |
| | import gradio as gr |
| | |
| | |
| | import requests |
| | from typing import Dict, List, AsyncGenerator, Union, Tuple |
| | from langchain_core.messages import HumanMessage, AIMessage, ToolMessage |
| | from langchain_core.tools import tool |
| | from langchain_openai import ChatOpenAI |
| | from langgraph.checkpoint.memory import MemorySaver |
| | from langgraph.prebuilt import create_react_agent |
| |
|
| | |
| | @tool |
| | def get_lat_lng(location_description: str) -> dict[str, float]: |
| | """Get the latitude and longitude of a location.""" |
| | print(f"Tool: Getting lat/lng for {location_description}") |
| | |
| | if "tokyo" in location_description.lower(): |
| | return {"lat": 35.6895, "lng": 139.6917} |
| | elif "paris" in location_description.lower(): |
| | return {"lat": 48.8566, "lng": 2.3522} |
| | elif "new york" in location_description.lower(): |
| | return {"lat": 40.7128, "lng": -74.0060} |
| | else: |
| | return {"lat": 51.5072, "lng": -0.1276} |
| |
|
| | @tool |
| | def get_weather(lat: float, lng: float) -> dict[str, str]: |
| | """Get the weather at a location.""" |
| | print(f"Tool: Getting weather for lat={lat}, lng={lng}") |
| | |
| | |
| | if lat > 45: |
| | return {"temperature": "15°C", "description": "Cloudy"} |
| | elif lat > 30: |
| | return {"temperature": "25°C", "description": "Sunny"} |
| | else: |
| | return {"temperature": "30°C", "description": "Very Sunny"} |
| |
|
| |
|
| | async def Answer_from_agent(message: str, history: List[List[str]]) -> AsyncGenerator[str, None]: |
| | """Processes message through LangChain agent, yielding intermediate steps as strings.""" |
| |
|
| | |
| | lc_messages = [] |
| | for user_msg, ai_msg in history: |
| | if user_msg: |
| | lc_messages.append(HumanMessage(content=user_msg)) |
| | if ai_msg: |
| | |
| | |
| | |
| | |
| | if not ai_msg.startswith("🛠️ Using") and not ai_msg.startswith("Result:"): |
| | lc_messages.append(AIMessage(content=ai_msg)) |
| |
|
| | lc_messages.append(HumanMessage(content=message)) |
| |
|
| | llm = ChatOpenAI(temperature=0, model="gpt-4") |
| | memory = MemorySaver() |
| | tools = [get_lat_lng, get_weather] |
| | agent_executor = create_react_agent(llm, tools, checkpointer=memory) |
| |
|
| | |
| | |
| | thread_id = "user_session_" + str(os.urandom(4).hex()) |
| |
|
| | full_response = "" |
| |
|
| | async for chunk in agent_executor.astream_events( |
| | {"messages": lc_messages}, |
| | config={"configurable": {"thread_id": thread_id}}, |
| | version="v1" |
| | ): |
| | event = chunk["event"] |
| | data = chunk["data"] |
| |
|
| | if event == "on_chat_model_stream": |
| | content = data["chunk"].content |
| | if content: |
| | full_response += content |
| | yield full_response |
| |
|
| | elif event == "on_tool_start": |
| | tool_input_str = str(data.get('input', '')) |
| | yield f"🛠️ Using tool: **{data['name']}** with input: `{tool_input_str}`" |
| |
|
| | elif event == "on_tool_end": |
| | tool_output_str = str(data.get('output', '')) |
| | yield f"Tool **{data['name']}** finished.\nResult: `{tool_output_str}`" |
| | if full_response: |
| | yield full_response |
| |
|
| | if full_response and (not chunk or chunk["event"] != "on_chat_model_stream"): |
| | yield full_response |
| |
|
| |
|
| | |
| | demo = gr.ChatInterface( |
| | fn=stream_from_agent, |
| | type="messages", |
| | title="🤖 AGent template", |
| | description="Ask about the weather anywhere! Watch as I gather the information step by step.", |
| | cache_examples=False, |
| | save_history=True, |
| | editable=True, |
| | ) |
| |
|
| | if __name__ == "__main__": |
| | |
| | try: |
| | openai_api_key = os.getenv("OPENAI_API_KEY") |
| | if openai_api_key: |
| | print("OPENAI_API_KEY found.") |
| | except: |
| | alert("Warning: OPENAI_API_KEY not found in environment variables.") |
| | demo.launch(debug=True, server_name="0.0.0.0") |