shubhamgs commited on
Commit
c4fc446
·
verified ·
1 Parent(s): c520630

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +79 -0
  2. deep_research_system.py +111 -0
app.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from deep_research_system import run_deep_research_system
3
+ import traceback
4
+ import streamlit.components.v1 as components
5
+
6
+ # Set page configuration
7
+ st.set_page_config(page_title="Deep Research AI", layout="centered")
8
+
9
+ # Title and instructions
10
+ st.title("Deep Research AI Agentic System")
11
+ st.write("Enter a question below to get the latest insights from web research. Click 'Reset' to start over.")
12
+
13
+ # Initialize session state
14
+ if "show_reset_button" not in st.session_state:
15
+ st.session_state.show_reset_button = False
16
+ if "question" not in st.session_state:
17
+ st.session_state.question = ""
18
+ if "reset_triggered" not in st.session_state:
19
+ st.session_state.reset_triggered = False
20
+
21
+ # JavaScript to clear the input field
22
+ clear_input_js = """
23
+ <script>
24
+ const input = window.parent.document.querySelector('input[aria-label="Your Question"]');
25
+ if (input) {
26
+ input.value = '';
27
+ }
28
+ </script>
29
+ """
30
+
31
+ # Use a form to manage the question input and submission
32
+ with st.form(key="question_form"):
33
+ st.session_state.question = st.text_input(
34
+ "Your Question",
35
+ placeholder="e.g., What are the latest advancements in quantum computing?",
36
+ value=st.session_state.question,
37
+ key="question_input"
38
+ )
39
+ submit_button = st.form_submit_button("Get Answer")
40
+
41
+ # Process the form submission
42
+ if submit_button:
43
+ if st.session_state.question:
44
+ st.write(f"Research Agent: Searching for '{st.session_state.question}'...")
45
+ try:
46
+ with st.spinner("Gathering research data..."):
47
+ answer = run_deep_research_system(st.session_state.question)
48
+ st.write("Research Agent: Found 5 relevant sources.")
49
+ st.write("Answer Drafter Agent: Drafted the final answer.")
50
+ st.write("**Final Answer:**")
51
+ st.write(answer)
52
+ st.session_state.show_reset_button = True
53
+ except Exception as e:
54
+ st.error(f"An error occurred: {str(e)}\n{traceback.format_exc()}")
55
+ st.session_state.show_reset_button = True
56
+ else:
57
+ st.warning("Please enter a question!")
58
+ # Reset the trigger flag after submission
59
+ st.session_state.reset_triggered = False
60
+
61
+ # Function to clear the input state
62
+ def clear_input():
63
+ st.session_state.show_reset_button = False
64
+ st.session_state.question = ""
65
+ st.session_state.pop("question_input", None)
66
+ st.session_state.pop("question_form", None)
67
+ st.session_state.reset_triggered = True
68
+
69
+ # Show Reset button only if show_reset_button is True
70
+ if st.session_state.show_reset_button:
71
+ if st.button("Reset"):
72
+ # Clear the input and reset state
73
+ clear_input()
74
+ # Refresh the webpage
75
+ st.rerun()
76
+
77
+ # Execute JavaScript to clear the input field if reset was triggered
78
+ if st.session_state.reset_triggered:
79
+ components.html(clear_input_js, height=0)
deep_research_system.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, TypedDict, List
3
+ from langchain_core.prompts import ChatPromptTemplate
4
+ from langchain_openai import ChatOpenAI
5
+ from langgraph.graph import StateGraph, END
6
+ from tavily import TavilyClient
7
+
8
+ # Set up environment variables (replace with your API keys)
9
+ os.environ["OPENAI_API_KEY"] = "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"
10
+ os.environ["TAVILY_API_KEY"] = "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"
11
+
12
+ # Initialize the LLM (using OpenAI as an example, replace with your preferred model)
13
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
14
+
15
+ # Initialize Tavily client for web search
16
+ tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
17
+
18
+ # Define the shared state for the agents
19
+ class ResearchState(TypedDict):
20
+ query: str
21
+ research_data: List[Dict]
22
+ final_answer: str
23
+
24
+ # Research Agent: Crawls the web and gathers information
25
+ def research_agent(state: ResearchState) -> ResearchState:
26
+ query = state["query"]
27
+ print(f"Research Agent: Searching for '{query}'...")
28
+
29
+ # Use Tavily to search the web
30
+ search_results = tavily_client.search(query, max_results=5)
31
+
32
+ # Extract relevant information from search results
33
+ research_data = []
34
+ for result in search_results["results"]:
35
+ research_data.append({
36
+ "title": result["title"],
37
+ "url": result["url"],
38
+ "content": result["content"][:500] # Limit content length for brevity
39
+ })
40
+
41
+ print(f"Research Agent: Found {len(research_data)} relevant sources.")
42
+ return {"research_data": research_data}
43
+
44
+ # Answer Drafter Agent: Processes research data and drafts a response
45
+ def answer_drafter_agent(state: ResearchState) -> ResearchState:
46
+ research_data = state["research_data"]
47
+ query = state["query"]
48
+
49
+ # Create a prompt for the answer drafter
50
+ prompt = ChatPromptTemplate.from_template(
51
+ """
52
+ You are an expert at drafting concise and accurate answers. Based on the following research data, provide a clear and informative response to the query: "{query}".
53
+
54
+ Research Data:
55
+ {research_data}
56
+
57
+ Provide a well-structured answer in 3-5 sentences, citing the sources where relevant.
58
+ """
59
+ )
60
+
61
+ # Format the research data for the prompt
62
+ research_text = "\n".join([f"- {item['title']}: {item['content']} (Source: {item['url']})" for item in research_data])
63
+ chain = prompt | llm
64
+
65
+ # Generate the final answer
66
+ response = chain.invoke({"query": query, "research_data": research_text})
67
+ final_answer = response.content
68
+
69
+ print("Answer Drafter Agent: Drafted the final answer.")
70
+ return {"final_answer": final_answer}
71
+
72
+ # Define the LangGraph workflow
73
+ def create_workflow():
74
+ workflow = StateGraph(ResearchState)
75
+
76
+ # Add nodes for each agent
77
+ workflow.add_node("research_agent", research_agent)
78
+ workflow.add_node("answer_drafter_agent", answer_drafter_agent)
79
+
80
+ # Define the flow: Research Agent -> Answer Drafter Agent -> End
81
+ workflow.add_edge("research_agent", "answer_drafter_agent")
82
+ workflow.add_edge("answer_drafter_agent", END)
83
+
84
+ # Set the entry point
85
+ workflow.set_entry_point("research_agent")
86
+
87
+ return workflow.compile()
88
+
89
+ # Main function to run the system
90
+ def run_deep_research_system(query: str) -> str:
91
+ # Initialize the workflow
92
+ app = create_workflow()
93
+
94
+ # Initial state
95
+ initial_state = {
96
+ "query": query,
97
+ "research_data": [],
98
+ "final_answer": ""
99
+ }
100
+
101
+ # Run the workflow
102
+ final_state = app.invoke(initial_state)
103
+
104
+ return final_state["final_answer"]
105
+
106
+ # Example usage
107
+ if __name__ == "__main__":
108
+ query = "What are the latest advancements in quantum computing?"
109
+ answer = run_deep_research_system(query)
110
+ print("\nFinal Answer:")
111
+ print(answer)