dennis111 commited on
Commit
c542bf3
·
1 Parent(s): 707cf08
Files changed (1) hide show
  1. agent.py +10 -10
agent.py CHANGED
@@ -7,7 +7,7 @@ from langchain.chat_models import init_chat_model
7
 
8
  from langchain_community.document_loaders import WikipediaLoader, ArxivLoader, YoutubeLoader
9
  from langchain_community.tools import TavilySearchResults
10
- from langchain_core.messages import HumanMessage, SystemMessage, AIMessage, AnyMessage
11
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
12
  from langgraph.graph import add_messages, START, END, StateGraph
13
  from langchain_core.tools import tool
@@ -56,7 +56,7 @@ def get_graph(llm):
56
  from langchain_community.retrievers import TavilySearchAPIRetriever
57
 
58
  # Wikipedia retriever
59
- wiki_retriever = WikipediaRetriever(load_max_docs =20)
60
  # Tavily retriever
61
  tavily_retriever = TavilySearchAPIRetriever(k=3)
62
 
@@ -93,7 +93,7 @@ def get_graph(llm):
93
  return a - b
94
 
95
  @tool
96
- def divide(a: int, b: int) -> int:
97
  """Divide two numbers.
98
 
99
  Args:
@@ -125,7 +125,7 @@ def get_graph(llm):
125
  print("The query is: ", query)
126
  docs = wiki_retriever.invoke(query)
127
  serialized = "\n\n".join(
128
- (f"\nContent:\n{doc.page_content}")
129
  for doc in docs
130
  )
131
 
@@ -160,7 +160,7 @@ def get_graph(llm):
160
  f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
161
  for doc in search_docs
162
  ])
163
- return {"wiki_results": formatted_search_docs}
164
 
165
  @tool
166
  def online_search(query: str):
@@ -172,7 +172,7 @@ def get_graph(llm):
172
  # docs = tavily_retriever.invoke(query)
173
  docs = TavilySearchResults(max_results=3).invoke({'query': query})
174
  serialized = "\n\n".join(
175
- (f"\nContent:\n{doc.page_content}")
176
  for doc in docs
177
  )
178
 
@@ -191,7 +191,7 @@ def get_graph(llm):
191
  f'URL: {doc["url"]}\nTitle= {doc["title"]}\nContent: {doc["content"]}'
192
  for doc in search_docs
193
  ])
194
- return {"web_results": formatted_search_docs}
195
 
196
  @tool
197
  def arvix_search(query: str) -> str:
@@ -206,7 +206,7 @@ def get_graph(llm):
206
  f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
207
  for doc in search_docs
208
  ])
209
- return {"arvix_results": formatted_search_docs}
210
 
211
  @tool
212
  def youtube_transcript(url: str) -> str:
@@ -220,10 +220,10 @@ def get_graph(llm):
220
  )
221
  docs = loader.load()
222
  transcript = "\n\n".join(
223
- (f"\nContent:\n{doc.page_content}")
224
  for doc in docs
225
  )
226
- return {"youtube_transcript": transcript}
227
 
228
 
229
 
 
7
 
8
  from langchain_community.document_loaders import WikipediaLoader, ArxivLoader, YoutubeLoader
9
  from langchain_community.tools import TavilySearchResults
10
+ from langchain_core.messages import HumanMessage
11
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
12
  from langgraph.graph import add_messages, START, END, StateGraph
13
  from langchain_core.tools import tool
 
56
  from langchain_community.retrievers import TavilySearchAPIRetriever
57
 
58
  # Wikipedia retriever
59
+ wiki_retriever = WikipediaRetriever()
60
  # Tavily retriever
61
  tavily_retriever = TavilySearchAPIRetriever(k=3)
62
 
 
93
  return a - b
94
 
95
  @tool
96
+ def divide(a: int, b: int) -> float:
97
  """Divide two numbers.
98
 
99
  Args:
 
125
  print("The query is: ", query)
126
  docs = wiki_retriever.invoke(query)
127
  serialized = "\n\n".join(
128
+ f"\nContent:\n{doc.page_content}"
129
  for doc in docs
130
  )
131
 
 
160
  f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
161
  for doc in search_docs
162
  ])
163
+ return formatted_search_docs
164
 
165
  @tool
166
  def online_search(query: str):
 
172
  # docs = tavily_retriever.invoke(query)
173
  docs = TavilySearchResults(max_results=3).invoke({'query': query})
174
  serialized = "\n\n".join(
175
+ f"\nContent:\n{doc.page_content}"
176
  for doc in docs
177
  )
178
 
 
191
  f'URL: {doc["url"]}\nTitle= {doc["title"]}\nContent: {doc["content"]}'
192
  for doc in search_docs
193
  ])
194
+ return formatted_search_docs
195
 
196
  @tool
197
  def arvix_search(query: str) -> str:
 
206
  f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
207
  for doc in search_docs
208
  ])
209
+ return formatted_search_docs
210
 
211
  @tool
212
  def youtube_transcript(url: str) -> str:
 
220
  )
221
  docs = loader.load()
222
  transcript = "\n\n".join(
223
+ f"\nContent:\n{doc.page_content}"
224
  for doc in docs
225
  )
226
+ return transcript
227
 
228
 
229