Commit ·
3868053
1
Parent(s): fa91d18
fix import
Browse files- app.py +1 -1
- lang_graph_agent.py +92 -0
- smol_agent.py +8 -0
app.py
CHANGED
|
@@ -3,7 +3,7 @@ import gradio as gr
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
-
import
|
| 7 |
|
| 8 |
# (Keep Constants as is)
|
| 9 |
# --- Constants ---
|
|
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
+
from lang_grpaph_agent import LangGraphAgent
|
| 7 |
|
| 8 |
# (Keep Constants as is)
|
| 9 |
# --- Constants ---
|
lang_graph_agent.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import TypedDict, List, Dict, Any, Optional, Annotated
|
| 3 |
+
from langgraph.graph import StateGraph, START, END
|
| 4 |
+
from langchain_openai import ChatOpenAI
|
| 5 |
+
from langchain_core.messages import HumanMessage, AIMessage
|
| 6 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
| 7 |
+
from langchain_core.utils.function_calling import convert_to_openai_function
|
| 8 |
+
from duckduckgo_search import DDGS
|
| 9 |
+
|
| 10 |
+
class AgentState(TypedDict):
|
| 11 |
+
question: str
|
| 12 |
+
messages: List[Any]
|
| 13 |
+
search_results: Optional[List[Dict[str, str]]]
|
| 14 |
+
final_answer: Optional[str]
|
| 15 |
+
|
| 16 |
+
def web_search(query: str, num_results: int = 3) -> List[Dict[str, str]]:
|
| 17 |
+
"""Perform a web search using DuckDuckGo"""
|
| 18 |
+
with DDGS() as ddgs:
|
| 19 |
+
results = []
|
| 20 |
+
for r in ddgs.text(query, max_results=num_results):
|
| 21 |
+
results.append({
|
| 22 |
+
'title': r['title'],
|
| 23 |
+
'link': r['link'],
|
| 24 |
+
'snippet': r['body']
|
| 25 |
+
})
|
| 26 |
+
return results
|
| 27 |
+
|
| 28 |
+
class LangGraphAgent:
|
| 29 |
+
def __init__(self):
|
| 30 |
+
print("LangGraphAgent initialized.")
|
| 31 |
+
self.llm = ChatOpenAI(model="gpt-4", temperature=0)
|
| 32 |
+
self.graph = self._build_graph()
|
| 33 |
+
|
| 34 |
+
def _build_graph(self) -> StateGraph:
|
| 35 |
+
workflow = StateGraph(AgentState)
|
| 36 |
+
|
| 37 |
+
# Define the search node
|
| 38 |
+
workflow.add_node("search", self.search_step)
|
| 39 |
+
|
| 40 |
+
# Define the answer generation node
|
| 41 |
+
workflow.add_node("generate_answer", self.generate_answer)
|
| 42 |
+
|
| 43 |
+
# Connect the nodes
|
| 44 |
+
workflow.set_entry_point("search")
|
| 45 |
+
workflow.add_edge("search", "generate_answer")
|
| 46 |
+
workflow.set_finish_point("generate_answer")
|
| 47 |
+
|
| 48 |
+
return workflow
|
| 49 |
+
|
| 50 |
+
def search_step(self, state: AgentState) -> AgentState:
|
| 51 |
+
"""Perform web search based on the question"""
|
| 52 |
+
search_results = web_search(state['question'])
|
| 53 |
+
state['search_results'] = search_results
|
| 54 |
+
return state
|
| 55 |
+
|
| 56 |
+
def generate_answer(self, state: AgentState) -> AgentState:
|
| 57 |
+
"""Generate final answer using search results"""
|
| 58 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 59 |
+
("system", "You are a helpful AI assistant that provides accurate answers based on search results."),
|
| 60 |
+
("human", "Question: {question}\n\nSearch Results:\n{search_results}\n\nPlease provide a comprehensive answer based on these search results."),
|
| 61 |
+
])
|
| 62 |
+
|
| 63 |
+
# Format search results for the prompt
|
| 64 |
+
formatted_results = '\n'.join([f"Title: {r['title']}\nSnippet: {r['snippet']}\nLink: {r['link']}\n"
|
| 65 |
+
for r in state['search_results']])
|
| 66 |
+
|
| 67 |
+
# Generate response
|
| 68 |
+
response = self.llm.invoke(
|
| 69 |
+
prompt.format_messages(
|
| 70 |
+
question=state['question'],
|
| 71 |
+
search_results=formatted_results
|
| 72 |
+
)
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
state['final_answer'] = response.content
|
| 76 |
+
return state
|
| 77 |
+
|
| 78 |
+
def __call__(self, question: str) -> str:
|
| 79 |
+
print(f"Agent received question: {question}")
|
| 80 |
+
|
| 81 |
+
# Initialize state
|
| 82 |
+
state = {
|
| 83 |
+
'question': question,
|
| 84 |
+
'messages': [],
|
| 85 |
+
'search_results': None,
|
| 86 |
+
'final_answer': None
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
# Run the graph
|
| 90 |
+
final_state = self.graph.invoke(state)
|
| 91 |
+
|
| 92 |
+
return final_state['final_answer']
|
smol_agent.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class BasicAgent:
|
| 2 |
+
def __init__(self):
|
| 3 |
+
print("BasicAgent initialized.")
|
| 4 |
+
def __call__(self, question: str) -> str:
|
| 5 |
+
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 6 |
+
fixed_answer = "This is a new default answer."
|
| 7 |
+
print(f"Agent returning fixed answer: {fixed_answer}")
|
| 8 |
+
return fixed_answer
|