RohitKeswani commited on
Commit
1b85a7a
·
1 Parent(s): 463e95a

adding agent

Browse files
Files changed (2) hide show
  1. app.py +22 -12
  2. search_agent.py +13 -0
app.py CHANGED
@@ -1,10 +1,12 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
-
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
8
 
9
 
10
  def respond(
@@ -25,19 +27,27 @@ def respond(
25
 
26
  messages.append({"role": "user", "content": message})
27
 
28
- response = ""
 
 
 
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
 
 
41
 
42
 
43
  """
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from langgraph.prebuilt import create_react_agent
4
+ from search_agent import tools
5
  """
6
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
8
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
+ graph = create_react_agent(client, tools=tools)
10
 
11
 
12
  def respond(
 
27
 
28
  messages.append({"role": "user", "content": message})
29
 
30
+ # response = ""
31
+
32
+ # for message in client.chat_completion(
33
+ # messages,
34
+ # max_tokens=max_tokens,
35
+ # stream=True,
36
+ # temperature=temperature,
37
+ # top_p=top_p,
38
+ # ):
39
+ # token = message.choices[0].delta.content
40
 
41
+ # response += token
42
+ # yield response
 
 
 
 
 
 
43
 
44
+ inputs = {"messages": [(role, content) for role, content in messages]}
45
+
46
+ # Get the response from the agent (this integrates your agent with the model)
47
+ agent_response = graph.invoke(inputs) # Process the inputs through your agent
48
+
49
+ # Return the final message from the agent
50
+ return agent_response['messages'][-1][1]
51
 
52
 
53
  """
search_agent.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal
2
+ from langchain_core.tools import tool
3
+ from langchain_community.tools import DuckDuckGoSearchRun
4
+
5
+ @tool
6
+ def get_search_results(query: str) -> str:
7
+ """Fetches search results from DuckDuckGo."""
8
+ # Perform the search using DuckDuckGoSearchRun
9
+ search_tool = DuckDuckGoSearchRun()
10
+ results = search_tool.invoke(query)
11
+ return results
12
+
13
+ tools = [get_search_results]