Rashmi0801 commited on
Commit
92acfce
·
verified ·
1 Parent(s): 38c5157

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -50
app.py CHANGED
@@ -7,7 +7,10 @@ from langchain.callbacks import StreamlitCallbackHandler
7
  import os
8
  from dotenv import load_dotenv
9
 
10
- # Used the inbuilt tools of Arxiv and Wikipedia
 
 
 
11
  api_wrapper_arxiv = ArxivAPIWrapper(top_k_results=1, doc_content_chars_max=250)
12
  arxiv = ArxivQueryRun(api_wrapper=api_wrapper_arxiv)
13
 
@@ -15,68 +18,39 @@ api_wrapper_wiki = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=25
15
  wiki = WikipediaQueryRun(api_wrapper=api_wrapper_wiki)
16
 
17
  search = DuckDuckGoSearchRun(name="Search")
 
 
 
 
 
 
 
 
 
 
 
18
 
 
19
  st.title("Langchain - Chat with Search")
20
- """
21
- In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app.
22
- Try more LangChain 🤝 Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent).
23
- """
24
 
25
- # Sidebar for settings
26
  st.sidebar.title("Settings")
27
- api_key = st.sidebar.text_input("Enter your Groq API Key:", type="password")
 
 
28
 
29
  if "messages" not in st.session_state:
30
- st.session_state["messages"] = [
31
- {"role":"assistant", "content":"Hi, I am a Chatbot who can search the web. How can I help you ?"}
32
- ]
33
 
34
  for msg in st.session_state.messages:
35
  st.chat_message(msg["role"]).write(msg["content"])
36
 
37
- if prompt:=st.chat_input(placeholder="What is machine learning ?"):
38
- st.session_state.messages.append({"role":"user", "content":prompt})
39
  st.chat_message("user").write(prompt)
40
 
41
- llm = ChatGroq(groq_api_key=api_key, model_name="Llama3-8b-8192", streaming=True)
42
- tools = [search, arxiv, wiki]
43
-
44
- search_agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True)
45
-
46
  with st.chat_message("assistant"):
47
  st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
48
- response = search_agent.run(st.session_state.messages, callbacks=[st_cb])
49
- st.session_state.messages.append({'role':'assistant', "content":response})
50
  st.write(response)
51
-
52
- # --- Build Gradio Interface using Blocks ---
53
- with gr.Blocks() as demo:
54
- gr.Markdown("# Basic Agent Evaluation Runner")
55
- gr.Markdown(
56
- """
57
- **Instructions:**
58
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
59
- 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
60
- 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
61
- ---
62
- **Disclaimers:**
63
- Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
64
- This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
65
- """
66
- )
67
-
68
- gr.LoginButton()
69
-
70
- run_button = gr.Button("Run Evaluation & Submit All Answers")
71
-
72
- status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
73
- # Removed max_rows=10 from DataFrame constructor
74
- results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
75
-
76
- run_button.click(
77
- fn=run_and_submit_all,
78
- outputs=[status_output, results_table]
79
- )
80
-
81
-
82
 
 
7
  import os
8
  from dotenv import load_dotenv
9
 
10
+ # --- Load API keys (optional if you're using a .env) ---
11
+ load_dotenv()
12
+
13
+ # --- Tool setup ---
14
  api_wrapper_arxiv = ArxivAPIWrapper(top_k_results=1, doc_content_chars_max=250)
15
  arxiv = ArxivQueryRun(api_wrapper=api_wrapper_arxiv)
16
 
 
18
  wiki = WikipediaQueryRun(api_wrapper=api_wrapper_wiki)
19
 
20
  search = DuckDuckGoSearchRun(name="Search")
21
+ tools = [search, arxiv, wiki]
22
+
23
+ # --- Model setup ---
24
+ api_key = os.getenv("GROQ_API_KEY", "") # or let user input via Streamlit
25
+ llm = ChatGroq(groq_api_key=api_key, model_name="Llama3-8b-8192", streaming=True)
26
+ search_agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True)
27
+
28
+ # -------- GAIA REQUIRED FUNCTION --------
29
+ def run_agent(query: str) -> str:
30
+ """GAIA-compatible agent runner."""
31
+ return search_agent.run(query)
32
 
33
+ # -------- Streamlit App --------
34
  st.title("Langchain - Chat with Search")
 
 
 
 
35
 
 
36
  st.sidebar.title("Settings")
37
+ user_api_key = st.sidebar.text_input("Enter your Groq API Key:", type="password")
38
+ if user_api_key:
39
+ llm.groq_api_key = user_api_key
40
 
41
  if "messages" not in st.session_state:
42
+ st.session_state["messages"] = [{"role": "assistant", "content": "Hi, I am a Chatbot who can search the web. How can I help you?"}]
 
 
43
 
44
  for msg in st.session_state.messages:
45
  st.chat_message(msg["role"]).write(msg["content"])
46
 
47
+ if prompt := st.chat_input(placeholder="What is machine learning?"):
48
+ st.session_state.messages.append({"role": "user", "content": prompt})
49
  st.chat_message("user").write(prompt)
50
 
 
 
 
 
 
51
  with st.chat_message("assistant"):
52
  st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
53
+ response = run_agent(prompt)
54
+ st.session_state.messages.append({"role": "assistant", "content": response})
55
  st.write(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56