atrmkj commited on
Commit
21dfff9
·
1 Parent(s): fc0882e

added first version of langgraph agent implementation w memory

Browse files
__pycache__/agent.cpython-312.pyc CHANGED
Binary files a/__pycache__/agent.cpython-312.pyc and b/__pycache__/agent.cpython-312.pyc differ
 
agent.py CHANGED
@@ -1,97 +1,243 @@
1
- import anthropic
2
  import os
3
- from dotenv import load_dotenv
4
- from tools.search_tool import search_duckduckgo
5
- from tools.retriever_tool import Retriever
 
 
 
 
6
 
7
- load_dotenv()
 
 
8
 
9
- import logging
 
 
 
 
 
 
 
10
 
11
- logging.basicConfig(
12
- level=logging.DEBUG,
13
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
14
- handlers=[logging.FileHandler("debug.log"), logging.StreamHandler()]
15
- )
16
-
17
- logger = logging.getLogger(__name__)
18
-
19
- client = anthropic.Anthropic(
20
- api_key=os.getenv("ANTHROPIC_API_KEY"),
21
- )
22
- # print("Anthropic API key:", os.getenv("ANTHROPIC_API_KEY"))
23
-
24
- retriever = Retriever(
25
- top_k=3,
26
- similarity_threshold=0.2,
27
- batch_size=8
28
- )
29
-
30
- def call_llm(prompt, model="claude-3-7-sonnet-20250219"):
31
- response = client.messages.create(
32
- model=model,
33
- max_tokens=500,
34
- temperature=0,
35
- system="""You are an expert clinical AI assistant. You must strictly reply in ONLY one of the following formats: TOOL: [Document], TOOL: [Search], or TOOL: [Both].
36
-
37
- For questions about general medical information like recovery times, procedure durations, or standard practices, prefer TOOL: [Search].
38
- For questions about specific medical cases or rare conditions found in the document database, use TOOL: [Document].
39
- For questions that would benefit from both sources, use TOOL: [Both].
40
-
41
- Never explain, never say anything else.""",
42
- messages=[
43
- {"role": "user", "content": f"""Question: "{prompt}"
44
-
45
- Decide the best tool for answering it. Reply exactly with TOOL: [Document], TOOL: [Search], or TOOL: [Both]. No other text."""}
46
- ]
47
- )
48
- return response.content[0].text
49
-
50
- def agent_respond(question):
51
- logger.debug(f"Received question: {question}")
52
 
53
- tool_decision = call_llm(
54
- f"""Decide which tool(s) are needed to answer this question: "{question}".
55
- Available tools:
56
- - Document RAG (for clinical facts)
57
- - Search (for public info)
58
-
59
- Reply in format:
60
- TOOL: [Document/Search/Both/All]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  """
62
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
- logger.debug(f"Tool decision raw response: '{tool_decision}'")
 
 
 
 
 
 
 
65
 
66
- use_document = "document" in tool_decision.lower()
67
- use_search = "search" in tool_decision.lower()
 
 
 
 
 
 
68
 
69
- logger.debug(f"Parsed decision - Use Document: {use_document}, Use Search: {use_search}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
- results = []
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
- if use_document:
74
- logger.debug("Retrieving from documents...")
75
- try:
76
- doc_info = retriever.query(question)
77
- logger.debug(f"Document retrieval returned {len(doc_info)} characters")
78
- results.append(f"Document info:\n{doc_info}")
79
- except Exception as e:
80
- logger.error(f"Document retrieval error: {e}")
81
- results.append(f"Document retrieval error: {str(e)}")
82
 
83
- if use_search:
84
- logger.debug("Searching web...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  try:
86
- search_info = search_duckduckgo(question)
87
- logger.debug(f"Search returned {len(search_info)} characters")
88
- results.append(f"Search info:\n{search_info}")
 
 
 
 
 
 
 
 
 
 
89
  except Exception as e:
90
- logger.error(f"Search error: {e}")
91
- results.append(f"Search error: {str(e)}")
92
-
93
- if results:
94
- return "\n\n".join(results)
95
- else:
96
- logger.warning("No results from either tool")
97
- return "Could not determine the right tool to use or both tools failed."
 
 
1
  import os
2
+ from typing import Dict, List, Any, Optional
3
+ from langchain_anthropic import ChatAnthropic
4
+ from langgraph.graph import StateGraph, START, END
5
+ from langgraph.checkpoint.memory import MemorySaver
6
+ from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
7
+ from typing import TypedDict, List, Optional, Union
8
+ import copy
9
 
10
+ from tools.retriever_tool import DocumentRetriever
11
+ from tools.search_tool import WebSearchTool
12
+ from tools.pdf_tool import PDFProcessor
13
 
14
+ class AgentState(TypedDict):
15
+ """State schema for the agent."""
16
+ messages: List[Union[HumanMessage, AIMessage]]
17
+ query: str
18
+ csv_results: Optional[str]
19
+ web_results: Optional[str]
20
+ pdf_results: Optional[str]
21
+ response: Optional[str]
22
 
23
+ class MedTranscriptAgent:
24
+ def __init__(self, anthropic_api_key: Optional[str] = None, debug: bool = False):
25
+ self.api_key = anthropic_api_key or os.getenv("ANTHROPIC_API_KEY")
26
+ if not self.api_key:
27
+ raise ValueError("Anthropic API key is required")
28
+
29
+ self.llm = ChatAnthropic(
30
+ model="claude-3-7-sonnet-20250219",
31
+ anthropic_api_key=self.api_key,
32
+ temperature=0.1
33
+ )
34
+
35
+ self.doc_retriever = DocumentRetriever()
36
+ self.web_search = WebSearchTool(debug=debug)
37
+ self.pdf_processor = PDFProcessor()
38
+ self.debug = debug
39
+
40
+ self.memory_store = MemorySaver()
41
+
42
+ self.conversation_threads = {}
43
+
44
+ self.graph = self._build_graph()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ def _build_graph(self) -> StateGraph:
47
+ """Build the LangGraph workflow for conversational QA"""
48
+
49
+ workflow = StateGraph(AgentState)
50
+
51
+ workflow.add_node("query_router", self._route_query)
52
+ workflow.add_node("document_search", self._perform_doc_search)
53
+ workflow.add_node("web_search", self._perform_web_search)
54
+ workflow.add_node("pdf_search", self._perform_pdf_search)
55
+ workflow.add_node("combine_results", self._generate_response)
56
+
57
+ workflow.add_edge(START, "query_router")
58
+ workflow.add_edge("query_router", "document_search")
59
+ workflow.add_edge("query_router", "web_search")
60
+ workflow.add_edge("query_router", "pdf_search")
61
+ workflow.add_edge("document_search", "combine_results")
62
+ workflow.add_edge("web_search", "combine_results")
63
+ workflow.add_edge("pdf_search", "combine_results")
64
+ workflow.add_edge("combine_results", END)
65
+
66
+ return workflow.compile(checkpointer=self.memory_store)
67
+
68
+ def _route_query(self, state: AgentState) -> Dict[str, Any]:
69
+ """Determine which tool(s) to use for the query"""
70
+
71
+ query = state["query"]
72
+ messages = state.get("messages", [])
73
+
74
+ if self.debug:
75
+ print(f"[Router] Processing query with {len(messages)} existing messages")
76
+
77
+ conversation_history = self._format_conversation_history(messages)
78
+
79
+ routing_prompt = f"""
80
+ You are a medical query router. Your job is to determine whether a query about medical topics should be:
81
+ 1. Answered using document search (for specific patient data or medical transcript information)
82
+ 2. Answered using web search (for general medical knowledge)
83
+ 3. Answered using PDF search (for detailed medical protocol or research documents)
84
+
85
+ Consider the conversation history and the current query when making your decision.
86
+
87
+ Conversation history:
88
+ {conversation_history}
89
+
90
+ Current query: {query}
91
+
92
+ Respond with one or more of: "document", "web", "pdf"
93
  """
94
+
95
+ route = self.llm.invoke(routing_prompt).content.strip().lower()
96
+
97
+ if self.debug:
98
+ print(f"[Router] Decision: {route}")
99
+
100
+ next_steps = []
101
+ if "document" in route:
102
+ next_steps.append("document_search")
103
+ if "web" in route:
104
+ next_steps.append("web_search")
105
+ if "pdf" in route:
106
+ next_steps.append("pdf_search")
107
+
108
+ if not next_steps:
109
+ next_steps = ["document_search", "web_search", "pdf_search"]
110
+
111
+ return {"next": next_steps}
112
 
113
+ def _perform_doc_search(self, state: AgentState) -> Dict[str, Any]:
114
+ """Perform document search and return results"""
115
+ query = state["query"]
116
+ if self.debug:
117
+ print(f"[Document Search] Searching for: {query}")
118
+ results = self.doc_retriever.query(query)
119
+
120
+ return {"csv_results": results}
121
 
122
+ def _perform_web_search(self, state: AgentState) -> Dict[str, Any]:
123
+ """Perform web search and return results"""
124
+ query = state["query"]
125
+ if self.debug:
126
+ print(f"[Web Search] Searching for: {query}")
127
+ results = self.web_search.search(query)
128
+
129
+ return {"web_results": results}
130
 
131
+ def _perform_pdf_search(self, state: AgentState) -> Dict[str, Any]:
132
+ """Perform PDF search and return results"""
133
+ query = state["query"]
134
+ if self.debug:
135
+ print(f"[PDF Search] Searching for: {query}")
136
+ results = self.pdf_processor.search(query)
137
+
138
+ return {"pdf_results": results}
139
+
140
+ def _generate_response(self, state: AgentState) -> Dict[str, Any]:
141
+ """Generate a response based on search results and conversation history"""
142
+ query = state["query"]
143
+ messages = state.get("messages", [])
144
+
145
+ if self.debug:
146
+ print(f"[Generate Response] Processing with {len(messages)} messages in history")
147
+
148
+ csv_results = state.get("csv_results", "No document results available")
149
+ web_results = state.get("web_results", "No web results available")
150
+ pdf_results = state.get("pdf_results", "No PDF results available")
151
+
152
+ conversation_history = self._format_conversation_history(messages)
153
+
154
+ response_prompt = f"""
155
+ You are a helpful medical assistant answering questions about medical transcripts and general medical knowledge.
156
+ You have access to three types of information sources: medical transcripts (CSV), web search results, and PDF documents.
157
+
158
+ Conversation history:
159
+ {conversation_history}
160
+
161
+ Current query: {query}
162
+
163
+ Document search results: {csv_results}
164
+
165
+ Web search results: {web_results}
166
+
167
+ PDF search results: {pdf_results}
168
+
169
+ Based on all available information and your medical knowledge, provide a helpful, accurate, and compassionate response to the query.
170
+ Make sure to consider the conversation history for context and continuity.
171
+ When citing information, clearly indicate the source (Document, Web, or PDF).
172
+ """
173
+
174
+ response = self.llm.invoke(response_prompt).content
175
+
176
+ updated_messages = messages + [
177
+ HumanMessage(content=query),
178
+ AIMessage(content=response)
179
+ ]
180
+
181
+ if self.debug:
182
+ print(f"[Generate Response] History now has {len(updated_messages)} messages")
183
+
184
+ return {
185
+ "response": response,
186
+ "messages": updated_messages
187
+ }
188
 
189
+ def _format_conversation_history(self, messages: List) -> str:
190
+ """Format conversation history for inclusion in prompts"""
191
+ if not messages:
192
+ return "No previous conversation"
193
+
194
+ formatted = []
195
+ for i in range(0, len(messages), 2):
196
+ if i < len(messages):
197
+ user_msg = messages[i].content if i < len(messages) else ""
198
+ ai_msg = messages[i+1].content if i+1 < len(messages) else ""
199
+ formatted.append(f"Human: {user_msg}\nAI: {ai_msg}")
200
+
201
+ return "\n\n".join(formatted)
202
 
203
+ def load_pdf(self, file_path: str) -> str:
204
+ """Load a PDF document into the agent"""
205
+ return self.pdf_processor.load_pdf(file_path)
 
 
 
 
 
 
206
 
207
+ def chat(self, message: str, thread_id: str = "default") -> str:
208
+ """Process a message in a conversation thread"""
209
+ if thread_id in self.conversation_threads:
210
+ messages = self.conversation_threads[thread_id]
211
+ if self.debug:
212
+ print(f"[Chat] Retrieved {len(messages)} messages for thread {thread_id}")
213
+ else:
214
+ messages = []
215
+ if self.debug:
216
+ print(f"[Chat] Started new conversation thread {thread_id}")
217
+
218
+ state = {
219
+ "query": message,
220
+ "messages": copy.deepcopy(messages)
221
+ }
222
+
223
+ if self.debug:
224
+ print(f"[Chat] Processing query with initial state containing {len(state['messages'])} messages")
225
+
226
  try:
227
+ result = self.graph.invoke(
228
+ state,
229
+ config={"configurable": {"thread_id": thread_id}}
230
+ )
231
+
232
+ updated_messages = result.get("messages", [])
233
+
234
+ self.conversation_threads[thread_id] = copy.deepcopy(updated_messages)
235
+
236
+ if self.debug:
237
+ print(f"[Chat] Updated thread {thread_id} with {len(updated_messages)} messages")
238
+
239
+ return result["response"]
240
  except Exception as e:
241
+ error_msg = f"Error processing message: {str(e)}"
242
+ print(f"[ERROR] {error_msg}")
243
+ return error_msg
 
 
 
 
 
agent_v1.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import anthropic
2
+ import os
3
+ from dotenv import load_dotenv
4
+ from tools.search_tool import search_duckduckgo
5
+ from tools.retriever_tool import Retriever
6
+
7
+ load_dotenv()
8
+
9
+ import logging
10
+
11
+ logging.basicConfig(
12
+ level=logging.DEBUG,
13
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
14
+ handlers=[logging.FileHandler("debug.log"), logging.StreamHandler()]
15
+ )
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+ client = anthropic.Anthropic(
20
+ api_key=os.getenv("ANTHROPIC_API_KEY"),
21
+ )
22
+ # print("Anthropic API key:", os.getenv("ANTHROPIC_API_KEY"))
23
+
24
+ retriever = Retriever(
25
+ top_k=3,
26
+ similarity_threshold=0.2,
27
+ batch_size=8
28
+ )
29
+
30
+ def call_llm(prompt, model="claude-3-7-sonnet-20250219"):
31
+ response = client.messages.create(
32
+ model=model,
33
+ max_tokens=500,
34
+ temperature=0,
35
+ system="""You are an expert clinical AI assistant. You must strictly reply in ONLY one of the following formats: TOOL: [Document], TOOL: [Search], or TOOL: [Both].
36
+
37
+ For questions about general medical information like recovery times, procedure durations, or standard practices, prefer TOOL: [Search].
38
+ For questions about specific medical cases or rare conditions found in the document database, use TOOL: [Document].
39
+ For questions that would benefit from both sources, use TOOL: [Both].
40
+
41
+ Never explain, never say anything else.""",
42
+ messages=[
43
+ {"role": "user", "content": f"""Question: "{prompt}"
44
+
45
+ Decide the best tool for answering it. Reply exactly with TOOL: [Document], TOOL: [Search], or TOOL: [Both]. No other text."""}
46
+ ]
47
+ )
48
+ return response.content[0].text
49
+
50
+ def agent_respond(question):
51
+ logger.debug(f"Received question: {question}")
52
+
53
+ tool_decision = call_llm(
54
+ f"""Decide which tool(s) are needed to answer this question: "{question}".
55
+ Available tools:
56
+ - Document RAG (for clinical facts)
57
+ - Search (for public info)
58
+
59
+ Reply in format:
60
+ TOOL: [Document/Search/Both/All]
61
+ """
62
+ )
63
+
64
+ logger.debug(f"Tool decision raw response: '{tool_decision}'")
65
+
66
+ use_document = "document" in tool_decision.lower()
67
+ use_search = "search" in tool_decision.lower()
68
+
69
+ logger.debug(f"Parsed decision - Use Document: {use_document}, Use Search: {use_search}")
70
+
71
+ results = []
72
+
73
+ if use_document:
74
+ logger.debug("Retrieving from documents...")
75
+ try:
76
+ doc_info = retriever.query(question)
77
+ logger.debug(f"Document retrieval returned {len(doc_info)} characters")
78
+ results.append(f"Document info:\n{doc_info}")
79
+ except Exception as e:
80
+ logger.error(f"Document retrieval error: {e}")
81
+ results.append(f"Document retrieval error: {str(e)}")
82
+
83
+ if use_search:
84
+ logger.debug("Searching web...")
85
+ try:
86
+ search_info = search_duckduckgo(question)
87
+ logger.debug(f"Search returned {len(search_info)} characters")
88
+ results.append(f"Search info:\n{search_info}")
89
+ except Exception as e:
90
+ logger.error(f"Search error: {e}")
91
+ results.append(f"Search error: {str(e)}")
92
+
93
+ if results:
94
+ return "\n\n".join(results)
95
+ else:
96
+ logger.warning("No results from either tool")
97
+ return "Could not determine the right tool to use or both tools failed."
app.log ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-05-02 18:14:51,276 - sentence_transformers.SentenceTransformer - INFO - Use pytorch device_name: mps
2
+ 2025-05-02 18:14:51,276 - sentence_transformers.SentenceTransformer - INFO - Load pretrained SentenceTransformer: all-MiniLM-L6-v2
3
+ 2025-05-02 18:15:40,513 - sentence_transformers.SentenceTransformer - INFO - Use pytorch device_name: mps
4
+ 2025-05-02 18:15:40,514 - sentence_transformers.SentenceTransformer - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-mpnet-base-v2
5
+ 2025-05-02 18:15:42,134 - __main__ - INFO - Starting Medical Transcript Q&A System...
6
+ 2025-05-02 18:15:42,134 - __main__ - INFO - API Key present: Yes
7
+ 2025-05-02 18:15:42,217 - httpx - INFO - HTTP Request: GET http://127.0.0.1:7861/gradio_api/startup-events "HTTP/1.1 200 OK"
8
+ 2025-05-02 18:15:42,218 - httpx - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
9
+ 2025-05-02 18:15:42,226 - httpx - INFO - HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 200 OK"
10
+ 2025-05-02 18:16:27,475 - __main__ - INFO - User message: Was any meniscus tear found in the Arthroscopic Meniscoplasty?
11
+
12
+
13
+ 2025-05-02 18:16:27,475 - __main__ - INFO - Current history length: 0
14
+ 2025-05-02 18:16:27,475 - __main__ - INFO - Current conversation ID: None
15
+ 2025-05-02 18:16:27,578 - __main__ - INFO - Processing user message: Was any meniscus tear found in the Arthroscopic Me...
16
+ 2025-05-02 18:16:27,578 - __main__ - INFO - Created new conversation with ID: f85e94c5-9bb8-43aa-8731-0fe380de9859
17
+ 2025-05-02 18:16:27,578 - __main__ - INFO - Processing message for conversation f85e94c5-9bb8-43aa-8731-0fe380de9859: Was any meniscus tear found in the Arthroscopic Meniscoplasty?
18
+ 2025-05-02 18:16:28,519 - httpx - INFO - HTTP Request: POST https://api.anthropic.com/v1/messages "HTTP/1.1 200 OK"
19
+ 2025-05-02 18:16:29,504 - primp - INFO - response: https://lite.duckduckgo.com/lite/ 200
20
+ 2025-05-02 18:16:36,709 - httpx - INFO - HTTP Request: POST https://api.anthropic.com/v1/messages "HTTP/1.1 200 OK"
21
+ 2025-05-02 18:16:36,714 - __main__ - INFO - Thread f85e94c5-9bb8-43aa-8731-0fe380de9859 now has 2 messages
22
+ 2025-05-02 18:16:53,596 - __main__ - INFO - User message: what are the usual surgeries for these
23
+
24
+ 2025-05-02 18:16:53,596 - __main__ - INFO - Current history length: 2
25
+ 2025-05-02 18:16:53,597 - __main__ - INFO - Current conversation ID: f85e94c5-9bb8-43aa-8731-0fe380de9859
26
+ 2025-05-02 18:16:53,648 - __main__ - INFO - Processing user message: what are the usual surgeries for these...
27
+ 2025-05-02 18:16:53,649 - __main__ - INFO - Processing message for conversation f85e94c5-9bb8-43aa-8731-0fe380de9859: what are the usual surgeries for these
28
+ 2025-05-02 18:16:55,113 - httpx - INFO - HTTP Request: POST https://api.anthropic.com/v1/messages "HTTP/1.1 200 OK"
29
+ 2025-05-02 18:16:55,959 - primp - INFO - response: https://html.duckduckgo.com/html 200
30
+ 2025-05-02 18:17:04,935 - httpx - INFO - HTTP Request: POST https://api.anthropic.com/v1/messages "HTTP/1.1 200 OK"
31
+ 2025-05-02 18:17:04,941 - __main__ - INFO - Thread f85e94c5-9bb8-43aa-8731-0fe380de9859 now has 4 messages
32
+ 2025-05-02 18:21:46,013 - sentence_transformers.SentenceTransformer - INFO - Use pytorch device_name: mps
33
+ 2025-05-02 18:21:46,014 - sentence_transformers.SentenceTransformer - INFO - Load pretrained SentenceTransformer: all-MiniLM-L6-v2
34
+ 2025-05-02 18:22:34,286 - sentence_transformers.SentenceTransformer - INFO - Use pytorch device_name: mps
35
+ 2025-05-02 18:22:34,286 - sentence_transformers.SentenceTransformer - INFO - Load pretrained SentenceTransformer: sentence-transformers/all-mpnet-base-v2
36
+ 2025-05-02 18:22:35,944 - __main__ - INFO - Starting Medical Transcript Q&A System...
37
+ 2025-05-02 18:22:35,944 - __main__ - INFO - API Key present: Yes
38
+ 2025-05-02 18:22:36,024 - httpx - INFO - HTTP Request: GET http://127.0.0.1:7861/gradio_api/startup-events "HTTP/1.1 200 OK"
39
+ 2025-05-02 18:22:36,032 - httpx - INFO - HTTP Request: HEAD http://127.0.0.1:7861/ "HTTP/1.1 200 OK"
40
+ 2025-05-02 18:22:36,069 - httpx - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
41
+ 2025-05-02 18:22:54,999 - __main__ - INFO - User message: What was the diagnosis for the ORIF surgery?
42
+ 2025-05-02 18:22:54,999 - __main__ - INFO - Current history length: 0
43
+ 2025-05-02 18:22:54,999 - __main__ - INFO - Current conversation ID: None
44
+ 2025-05-02 18:22:55,101 - __main__ - INFO - Processing user message: What was the diagnosis for the ORIF surgery?...
45
+ 2025-05-02 18:22:55,101 - __main__ - INFO - Created new conversation with ID: 88e3c12d-a85a-4a5a-b0ec-cfc44290fc60
46
+ 2025-05-02 18:22:55,101 - __main__ - INFO - Processing message for conversation 88e3c12d-a85a-4a5a-b0ec-cfc44290fc60: What was the diagnosis for the ORIF surgery?
47
+ 2025-05-02 18:22:56,413 - httpx - INFO - HTTP Request: POST https://api.anthropic.com/v1/messages "HTTP/1.1 200 OK"
48
+ 2025-05-02 18:22:57,290 - primp - INFO - response: https://html.duckduckgo.com/html 200
49
+ 2025-05-02 18:23:03,557 - httpx - INFO - HTTP Request: POST https://api.anthropic.com/v1/messages "HTTP/1.1 200 OK"
50
+ 2025-05-02 18:23:03,563 - __main__ - INFO - Thread 88e3c12d-a85a-4a5a-b0ec-cfc44290fc60 now has 2 messages
51
+ 2025-05-02 18:23:29,388 - __main__ - INFO - Clearing conversation history
52
+ 2025-05-02 18:23:29,389 - __main__ - INFO - Created new conversation with ID: 465ebf30-3f9c-4fea-a20f-77e2f6ee8d14
53
+ 2025-05-02 18:23:31,781 - __main__ - INFO - User message: Summarize the Knee Arthroplasty surgical procedure.
54
+ 2025-05-02 18:23:31,781 - __main__ - INFO - Current history length: 0
55
+ 2025-05-02 18:23:31,781 - __main__ - INFO - Current conversation ID: 465ebf30-3f9c-4fea-a20f-77e2f6ee8d14
56
+ 2025-05-02 18:23:31,832 - __main__ - INFO - Processing user message: Summarize the Knee Arthroplasty surgical procedure...
57
+ 2025-05-02 18:23:31,833 - __main__ - INFO - Processing message for conversation 465ebf30-3f9c-4fea-a20f-77e2f6ee8d14: Summarize the Knee Arthroplasty surgical procedure.
58
+ 2025-05-02 18:23:33,174 - httpx - INFO - HTTP Request: POST https://api.anthropic.com/v1/messages "HTTP/1.1 200 OK"
59
+ 2025-05-02 18:23:33,993 - primp - INFO - response: https://html.duckduckgo.com/html 200
60
+ 2025-05-02 18:23:47,236 - httpx - INFO - HTTP Request: POST https://api.anthropic.com/v1/messages "HTTP/1.1 200 OK"
61
+ 2025-05-02 18:23:47,243 - __main__ - INFO - Thread 465ebf30-3f9c-4fea-a20f-77e2f6ee8d14 now has 2 messages
62
+ 2025-05-02 18:24:31,630 - __main__ - INFO - Clearing conversation history
63
+ 2025-05-02 18:24:31,630 - __main__ - INFO - Created new conversation with ID: ca5bf619-2a57-426c-8dfc-74736d20c035
64
+ 2025-05-02 18:24:50,632 - __main__ - INFO - User message: What surgery was done in the Knee Arthroplasty sample?
65
+ 2025-05-02 18:24:50,632 - __main__ - INFO - Current history length: 0
66
+ 2025-05-02 18:24:50,632 - __main__ - INFO - Current conversation ID: ca5bf619-2a57-426c-8dfc-74736d20c035
67
+ 2025-05-02 18:24:50,685 - __main__ - INFO - Processing user message: What surgery was done in the Knee Arthroplasty sam...
68
+ 2025-05-02 18:24:50,685 - __main__ - INFO - Processing message for conversation ca5bf619-2a57-426c-8dfc-74736d20c035: What surgery was done in the Knee Arthroplasty sample?
69
+ 2025-05-02 18:24:51,735 - httpx - INFO - HTTP Request: POST https://api.anthropic.com/v1/messages "HTTP/1.1 200 OK"
70
+ 2025-05-02 18:24:53,046 - primp - INFO - response: https://lite.duckduckgo.com/lite/ 200
71
+ 2025-05-02 18:25:00,726 - httpx - INFO - HTTP Request: POST https://api.anthropic.com/v1/messages "HTTP/1.1 200 OK"
72
+ 2025-05-02 18:25:00,731 - __main__ - INFO - Thread ca5bf619-2a57-426c-8dfc-74736d20c035 now has 2 messages
73
+ 2025-05-02 18:25:20,720 - __main__ - INFO - User message: what is the average recovery time for this
74
+ 2025-05-02 18:25:20,720 - __main__ - INFO - Current history length: 2
75
+ 2025-05-02 18:25:20,720 - __main__ - INFO - Current conversation ID: ca5bf619-2a57-426c-8dfc-74736d20c035
76
+ 2025-05-02 18:25:20,822 - __main__ - INFO - Processing user message: what is the average recovery time for this...
77
+ 2025-05-02 18:25:20,822 - __main__ - INFO - Processing message for conversation ca5bf619-2a57-426c-8dfc-74736d20c035: what is the average recovery time for this
78
+ 2025-05-02 18:25:21,720 - httpx - INFO - HTTP Request: POST https://api.anthropic.com/v1/messages "HTTP/1.1 200 OK"
79
+ 2025-05-02 18:25:22,679 - primp - INFO - response: https://html.duckduckgo.com/html 200
80
+ 2025-05-02 18:25:29,706 - httpx - INFO - HTTP Request: POST https://api.anthropic.com/v1/messages "HTTP/1.1 200 OK"
81
+ 2025-05-02 18:25:29,711 - __main__ - INFO - Thread ca5bf619-2a57-426c-8dfc-74736d20c035 now has 4 messages
app.py CHANGED
@@ -1,59 +1,173 @@
 
1
  import gradio as gr
2
- from agent import agent_respond
3
-
4
- def agent_interface(user_question, debug_mode=True):
5
- return agent_respond(user_question)
6
-
7
- custom_css = """
8
- .gradio-container {
9
- max-width: 1400px !important;
10
- margin-left: auto;
11
- margin-right: auto;
12
- }
13
- .output-box {
14
- min-height: 500px !important;
15
- font-size: 16px !important;
16
- }
17
- .input-box {
18
- min-height: 150px !important;
19
- font-size: 16px !important;
20
- }
21
- """
22
-
23
- with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
24
- gr.Markdown("# Medical transcripts QA agent")
25
- gr.Markdown("An agent that uses document retrieval and live web search to answer questions on medical transcripts.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  with gr.Column(scale=1):
29
- user_question = gr.Textbox(
30
- lines=4,
31
- placeholder="Ask a healthcare question...",
32
- elem_classes="input-box",
33
- label="Question"
34
  )
35
- debug_mode = gr.Checkbox(label="Debug Mode", value=True)
36
- submit_btn = gr.Button("Submit")
37
- clear_btn = gr.Button("Clear")
38
-
39
- with gr.Column(scale=2):
40
- output = gr.Textbox(
41
- lines=30,
42
- elem_classes="output-box",
43
- label="Response"
44
  )
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  submit_btn.click(
47
- fn=agent_interface,
48
- inputs=[user_question, debug_mode],
49
- outputs=output
 
 
 
 
50
  )
51
 
52
  clear_btn.click(
53
- fn=lambda: "",
54
- inputs=None,
55
- outputs=[user_question, output]
56
  )
57
 
58
  if __name__ == "__main__":
 
 
59
  demo.launch()
 
1
+ import os
2
  import gradio as gr
3
+ import tempfile
4
+ import json
5
+ from agent import MedTranscriptAgent
6
+ from dotenv import load_dotenv
7
+ import logging
8
+
9
+ logging.basicConfig(
10
+ level=logging.INFO,
11
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
12
+ handlers=[
13
+ logging.FileHandler("app.log"),
14
+ logging.StreamHandler()
15
+ ]
16
+ )
17
+ logger = logging.getLogger(__name__)
18
+
19
+ load_dotenv()
20
+
21
+ if not os.getenv("ANTHROPIC_API_KEY"):
22
+ logger.error("ANTHROPIC_API_KEY not found in environment variables or .env file")
23
+ raise ValueError("ANTHROPIC_API_KEY is required. Please add it to your .env file.")
24
+
25
+ agent = MedTranscriptAgent(debug=True)
26
+
27
+ conversation_threads = {}
28
+
29
+ def process_message(message, conversation_id=None, pdf_file=None):
30
+ if not conversation_id:
31
+ import uuid
32
+ conversation_id = str(uuid.uuid4())
33
+ conversation_threads[conversation_id] = True
34
+ logger.info(f"Created new conversation with ID: {conversation_id}")
35
+
36
+ if pdf_file is not None:
37
+ temp_dir = tempfile.mkdtemp()
38
+ temp_path = os.path.join(temp_dir, "uploaded.pdf")
39
+
40
+ with open(temp_path, "wb") as f:
41
+ f.write(pdf_file)
42
+
43
+ pdf_id = agent.load_pdf(temp_path)
44
+ logger.info(f"Loaded PDF '{pdf_id}' for conversation {conversation_id}")
45
+
46
+ logger.info(f"Processing message for conversation {conversation_id}: {message}")
47
+ response = agent.chat(message, thread_id=conversation_id)
48
+
49
+ if hasattr(agent, "conversation_threads") and conversation_id in agent.conversation_threads:
50
+ thread_msgs = agent.conversation_threads[conversation_id]
51
+ logger.info(f"Thread {conversation_id} now has {len(thread_msgs)} messages")
52
+
53
+ return response, conversation_id
54
+
55
+ with gr.Blocks(title="Medical Transcript Q&A System") as demo:
56
+ gr.Markdown("# Medical Transcript Q&A System")
57
+ gr.Markdown("Ask questions about medical procedures, treatments, or general medical information.")
58
+
59
+ conversation_id = gr.State(None)
60
 
61
  with gr.Row():
62
+ with gr.Column(scale=3):
63
+ chatbot = gr.Chatbot(
64
+ height=500,
65
+ type="messages",
66
+ avatar_images=("👤", "🩺"),
67
+ label="Conversation"
68
+ )
69
+ msg = gr.Textbox(
70
+ label="Your Question",
71
+ placeholder="Ask a medical question...",
72
+ lines=3
73
+ )
74
+
75
+ with gr.Row():
76
+ submit_btn = gr.Button("Submit", variant="primary", scale=2)
77
+ clear_btn = gr.Button("Clear Conversation", scale=1)
78
+
79
  with gr.Column(scale=1):
80
+ pdf_input = gr.File(
81
+ label="Upload PDF Document (Optional)",
82
+ file_types=[".pdf"],
83
+ type="binary"
 
84
  )
85
+ debug_info = gr.Textbox(
86
+ label="Debug Info",
87
+ visible=True,
88
+ interactive=False,
89
+ lines=5
 
 
 
 
90
  )
91
 
92
+ def user(message, history, conv_id, pdf):
93
+ """Add user message to chat history"""
94
+ logger.info(f"User message: {message}")
95
+ logger.info(f"Current history length: {len(history) if history else 0}")
96
+ logger.info(f"Current conversation ID: {conv_id}")
97
+
98
+ if history is None:
99
+ history = []
100
+ history.append({"role": "user", "content": message})
101
+
102
+ return "", history, conv_id, pdf
103
+
104
+ def bot(history, conv_id, pdf):
105
+ """Process user message and add bot response to chat history"""
106
+ if not history or len(history) == 0:
107
+ return history, conv_id, pdf, "Error: No message to process"
108
+
109
+ user_message = history[-1]["content"]
110
+ logger.info(f"Processing user message: {user_message[:50]}...")
111
+
112
+ try:
113
+ response, new_conv_id = process_message(user_message, conv_id, pdf)
114
+
115
+ history.append({"role": "assistant", "content": response})
116
+
117
+ thread_msg_count = 0
118
+ if hasattr(agent, "conversation_threads") and new_conv_id in agent.conversation_threads:
119
+ thread_msg_count = len(agent.conversation_threads[new_conv_id])
120
+
121
+ debug_text = f"Conversation ID: {new_conv_id}\n"
122
+ debug_text += f"UI Messages: {len(history)}\n"
123
+ debug_text += f"Agent Thread Messages: {thread_msg_count}\n"
124
+ debug_text += f"PDF Uploaded: {'Yes' if pdf else 'No'}\n"
125
+
126
+ return history, new_conv_id, None, debug_text
127
+ except Exception as e:
128
+ error_msg = str(e)
129
+ logger.error(f"Error processing message: {error_msg}")
130
+
131
+ history.append({"role": "assistant", "content": f"Error: {error_msg}"})
132
+ return history, conv_id, None, f"Error occurred: {error_msg}"
133
+
134
+ def clear_conversation():
135
+ """Clear the current conversation and start a new one"""
136
+ logger.info("Clearing conversation history")
137
+
138
+ import uuid
139
+ new_id = str(uuid.uuid4())
140
+ logger.info(f"Created new conversation with ID: {new_id}")
141
+
142
+ return new_id, gr.update(value=None), [], None, f"Started new conversation with ID: {new_id}"
143
+
144
+ msg.submit(
145
+ user,
146
+ [msg, chatbot, conversation_id, pdf_input],
147
+ [msg, chatbot, conversation_id, pdf_input]
148
+ ).then(
149
+ bot,
150
+ [chatbot, conversation_id, pdf_input],
151
+ [chatbot, conversation_id, pdf_input, debug_info]
152
+ )
153
+
154
  submit_btn.click(
155
+ user,
156
+ [msg, chatbot, conversation_id, pdf_input],
157
+ [msg, chatbot, conversation_id, pdf_input]
158
+ ).then(
159
+ bot,
160
+ [chatbot, conversation_id, pdf_input],
161
+ [chatbot, conversation_id, pdf_input, debug_info]
162
  )
163
 
164
  clear_btn.click(
165
+ clear_conversation,
166
+ None,
167
+ [conversation_id, msg, chatbot, pdf_input, debug_info]
168
  )
169
 
170
  if __name__ == "__main__":
171
+ logger.info("Starting Medical Transcript Q&A System...")
172
+ logger.info(f"API Key present: {'Yes' if os.getenv('ANTHROPIC_API_KEY') else 'No'}")
173
  demo.launch()
app_v1.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from agent_v1 import agent_respond
3
+
4
+ def agent_interface(user_question, debug_mode=True):
5
+ return agent_respond(user_question)
6
+
7
+ custom_css = """
8
+ .gradio-container {
9
+ max-width: 1400px !important;
10
+ margin-left: auto;
11
+ margin-right: auto;
12
+ }
13
+ .output-box {
14
+ min-height: 500px !important;
15
+ font-size: 16px !important;
16
+ }
17
+ .input-box {
18
+ min-height: 150px !important;
19
+ font-size: 16px !important;
20
+ }
21
+ """
22
+
23
+ with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
24
+ gr.Markdown("# Medical transcripts QA agent")
25
+ gr.Markdown("An agent that uses document retrieval and live web search to answer questions on medical transcripts.")
26
+
27
+ with gr.Row():
28
+ with gr.Column(scale=1):
29
+ user_question = gr.Textbox(
30
+ lines=4,
31
+ placeholder="Ask a healthcare question...",
32
+ elem_classes="input-box",
33
+ label="Question"
34
+ )
35
+ debug_mode = gr.Checkbox(label="Debug Mode", value=True)
36
+ submit_btn = gr.Button("Submit")
37
+ clear_btn = gr.Button("Clear")
38
+
39
+ with gr.Column(scale=2):
40
+ output = gr.Textbox(
41
+ lines=30,
42
+ elem_classes="output-box",
43
+ label="Response"
44
+ )
45
+
46
+ submit_btn.click(
47
+ fn=agent_interface,
48
+ inputs=[user_question, debug_mode],
49
+ outputs=output
50
+ )
51
+
52
+ clear_btn.click(
53
+ fn=lambda: "",
54
+ inputs=None,
55
+ outputs=[user_question, output]
56
+ )
57
+
58
+ if __name__ == "__main__":
59
+ demo.launch()
requirements.txt CHANGED
@@ -7,3 +7,10 @@ sentence-transformers
7
  pandas
8
  duckduckgo_search
9
  gradio
 
 
 
 
 
 
 
 
7
  pandas
8
  duckduckgo_search
9
  gradio
10
+ ## for langraph and langchain
11
+ langgraph>=0.0.25
12
+ pypdf>=3.15.1
13
+ langchain>=0.1.0
14
+ langchain-anthropic>=0.1.0
15
+ langchain-community>=0.0.13
16
+ langchain-text-splitters>=0.0.1
run_agent.py CHANGED
@@ -1,4 +1,4 @@
1
- from agent import agent_respond
2
 
3
  def main():
4
  print("Welcome to the Healthcare Assistant!")
 
1
+ from agent_v1 import agent_respond
2
 
3
  def main():
4
  print("Welcome to the Healthcare Assistant!")
tools/__pycache__/pdf_tool.cpython-312.pyc ADDED
Binary file (4.33 kB). View file
 
tools/__pycache__/retriever_tool.cpython-312.pyc CHANGED
Binary files a/tools/__pycache__/retriever_tool.cpython-312.pyc and b/tools/__pycache__/retriever_tool.cpython-312.pyc differ
 
tools/__pycache__/search_tool.cpython-312.pyc CHANGED
Binary files a/tools/__pycache__/search_tool.cpython-312.pyc and b/tools/__pycache__/search_tool.cpython-312.pyc differ
 
tools/pdf_tool.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Dict, Any, Optional
3
+ from pypdf import PdfReader
4
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
5
+ from langchain_community.vectorstores import FAISS
6
+ from langchain_community.embeddings import HuggingFaceEmbeddings
7
+
8
+
9
+ class PDFProcessor:
10
+ def __init__(self, debug: bool = False):
11
+ self.debug = debug
12
+ self.pdf_docs = {}
13
+ self.vector_stores = {}
14
+
15
+ self.embeddings = HuggingFaceEmbeddings(
16
+ model_name="sentence-transformers/all-mpnet-base-v2"
17
+ )
18
+
19
+ def load_pdf(self, file_path: str) -> str:
20
+ if not os.path.exists(file_path):
21
+ raise FileNotFoundError(f"PDF file not found at {file_path}")
22
+
23
+ doc_id = os.path.basename(file_path).split('.')[0]
24
+
25
+ text = ""
26
+ reader = PdfReader(file_path)
27
+ for page in reader.pages:
28
+ text += page.extract_text() + "\n"
29
+
30
+ text_splitter = RecursiveCharacterTextSplitter(
31
+ chunk_size=1500,
32
+ chunk_overlap=150
33
+ )
34
+ chunks = text_splitter.create_documents([text])
35
+
36
+ for i, chunk in enumerate(chunks):
37
+ page_num = i // 3
38
+ chunk.metadata["source"] = f"{doc_id}_page_{page_num}"
39
+
40
+ self.pdf_docs[doc_id] = chunks
41
+
42
+ vector_store = FAISS.from_documents(chunks, self.embeddings)
43
+ self.vector_stores[doc_id] = vector_store
44
+
45
+ if self.debug:
46
+ print(f"Loaded PDF {doc_id} with {len(chunks)} chunks")
47
+
48
+ return doc_id
49
+
50
+ def search(self, query: str, doc_id: Optional[str] = None, k: int = 4) -> str:
51
+ if not self.pdf_docs:
52
+ return "No PDF documents have been loaded yet."
53
+
54
+ if doc_id and doc_id not in self.pdf_docs:
55
+ return f"Document with ID {doc_id} not found."
56
+
57
+ stores_to_search = [self.vector_stores[doc_id]] if doc_id else list(self.vector_stores.values())
58
+
59
+ all_docs = []
60
+ for store in stores_to_search:
61
+ docs = store.similarity_search(query, k=min(k, len(store.index_to_docstore_id)))
62
+ all_docs.extend(docs)
63
+
64
+ if len(stores_to_search) > 1:
65
+ all_docs = all_docs[:k]
66
+
67
+ if not all_docs:
68
+ return "No relevant information found in the PDF documents."
69
+
70
+ results = []
71
+ for i, doc in enumerate(all_docs):
72
+ source = doc.metadata.get("source", "Unknown")
73
+ content = doc.page_content.strip()
74
+ results.append(f"[PDF-{i+1}] {source}:\n{content}\n")
75
+
76
+ formatted_results = "\n".join(results)
77
+
78
+ if self.debug:
79
+ print(f"PDF search results for query '{query}':")
80
+ print(formatted_results)
81
+
82
+ return formatted_results
tools/retriever_tool.py CHANGED
@@ -7,7 +7,7 @@ import os
7
  import time
8
  from sentence_transformers import SentenceTransformer
9
 
10
- class Retriever:
11
  def __init__(self, csv_path="data/mtsamples_surgery.csv", top_k=3, similarity_threshold=0.2, batch_size=8):
12
  self.model = SentenceTransformer('all-MiniLM-L6-v2')
13
  self.dimension = self.model.get_sentence_embedding_dimension()
@@ -77,7 +77,6 @@ class Retriever:
77
  if new_metadata:
78
  self.metadata.extend(new_metadata)
79
 
80
- # Encode and add to index
81
  for i in range(0, len(processed_texts), self.batch_size):
82
  batch = processed_texts[i:i+min(self.batch_size, len(processed_texts)-i)]
83
  batch_embeddings = self.model.encode(batch, show_progress_bar=False)
 
7
  import time
8
  from sentence_transformers import SentenceTransformer
9
 
10
+ class DocumentRetriever:
11
  def __init__(self, csv_path="data/mtsamples_surgery.csv", top_k=3, similarity_threshold=0.2, batch_size=8):
12
  self.model = SentenceTransformer('all-MiniLM-L6-v2')
13
  self.dimension = self.model.get_sentence_embedding_dimension()
 
77
  if new_metadata:
78
  self.metadata.extend(new_metadata)
79
 
 
80
  for i in range(0, len(processed_texts), self.batch_size):
81
  batch = processed_texts[i:i+min(self.batch_size, len(processed_texts)-i)]
82
  batch_embeddings = self.model.encode(batch, show_progress_bar=False)
tools/search_tool.py CHANGED
@@ -1,16 +1,27 @@
1
  from duckduckgo_search import DDGS
2
 
3
 
4
- def search_duckduckgo(query, max_results=3):
5
- """Perform a DuckDuckGo search for the given query."""
6
- results = []
7
- try:
8
- with DDGS() as ddgs:
9
- for r in ddgs.text(query, max_results=max_results):
10
- results.append(f"Title: {r.get('title', 'No title')}\nSource: {r.get('href', 'No source')}\n{r['body']}")
11
- if results:
12
- return "\n\n".join(results)
13
- else:
14
- return "No relevant information found."
15
- except Exception as e:
16
- return f"Search error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
1
  from duckduckgo_search import DDGS
2
 
3
 
4
+ class WebSearchTool:
5
+
6
+ def __init__(self, debug=False):
7
+ self.debug = debug
8
+
9
+ def search_duckduckgo(self, query, max_results=3):
10
+
11
+ results = []
12
+ try:
13
+ with DDGS() as ddgs:
14
+ for r in ddgs.text(query, max_results=max_results):
15
+ results.append(f"Title: {r.get('title', 'No title')}\nSource: {r.get('href', 'No source')}\n{r['body']}")
16
+ if results:
17
+ return "\n\n".join(results)
18
+ else:
19
+ return "No relevant information found."
20
+ except Exception as e:
21
+ return f"Search error: {str(e)}"
22
+
23
+ def search(self, query, max_results=3):
24
+ """Interface method that matches the expected API in agent.py"""
25
+ if self.debug:
26
+ print(f"Searching for: {query}")
27
+ return self.search_duckduckgo(query, max_results)