MusaR commited on
Commit
9349cd3
·
verified ·
1 Parent(s): 04f7b27

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -74
app.py CHANGED
@@ -7,20 +7,18 @@ from sentence_transformers import SentenceTransformer, CrossEncoder
7
  from research_agent.config import AgentConfig
8
  from research_agent.agent import get_clarifying_questions, research_and_plan, write_report_stream
9
 
10
- # --- CSS for styling the Gradio app ---
11
  CSS = """
12
- body, .gradio-container { font-family: 'Inter', sans-serif; }
13
- .gradio-container { max-width: 960px !important; margin: auto !important; }
14
- h1 { text-align: center; font-size: 2.2em; color: #334155; }
15
- .sub-header { text-align: center; color: #64748B; margin-bottom: 20px; }
16
- .gr-button { background-color: #2563EB; color: white; }
17
- .gr-button:hover { background-color: #1E4ED8; }
18
- .accordion { border: 1px solid #E5E7EB !important; border-radius: 8px !important; }
19
- .chat-bubble-container { display: flex; flex-direction: column; gap: 5px; }
20
- .chat-bubble-message { padding: 10px; border-radius: 10px; font-size: 0.95rem; }
21
- .user-message { background-color: #EFF6FF; color: #1E3A8A; align-self: flex-end; border-bottom-right-radius: 2px; }
22
- .bot-message { background-color: #F8FAFC; color: #334155; align-self: flex-start; border: 1px solid #E2E8F0; border-bottom-left-radius: 2px; }
23
- .thinking { color: #64748B; font-style: italic; text-align: center; padding: 10px; }
24
  """
25
 
26
  # --- Model Initialization ---
@@ -38,97 +36,104 @@ def initialize_models(google_key, tavily_key):
38
  planner_model = genai.GenerativeModel(config.WRITER_MODEL)
39
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2', device='cpu')
40
  reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', device='cpu')
41
- print("Models initialized successfully!")
42
  except Exception as e:
43
  raise gr.Error(f"Failed to initialize models. Error: {str(e)}")
44
 
45
  # --- Gradio Application Logic ---
46
- with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as app:
47
- gr.Markdown("# Mini DeepSearch Agent")
48
- gr.Markdown("This agent performs in-depth research using a multi-step AI process.", elem_classes="sub-header")
49
 
50
- with gr.Row():
51
- with gr.Column(scale=1):
52
- with gr.Accordion("API & Settings", open=True, elem_classes="accordion") as settings:
53
- google_api_key_input = gr.Textbox(label="Google API Key", type="password", placeholder="Enter Google AI API Key")
54
- tavily_api_key_input = gr.Textbox(label="Tavily API Key", type="password", placeholder="Enter Tavily Search API Key")
55
- init_button = gr.Button("Initialize Agent")
56
-
57
- with gr.Column(scale=3):
58
- chatbot = gr.Chatbot(
59
- [],
60
- label="Research Agent Conversation",
61
- bubble_full_width=False,
62
- avatar_images=(None, "https://www.gradio.app/images/logo.png"),
63
- elem_id="chatbot"
64
- )
65
- chat_input = gr.Textbox(placeholder="What would you like to research?", interactive=False)
66
 
67
- # --- State Management ---
68
- # agent_state can be: "INITIAL", "CLARIFYING", "GENERATING", "DONE"
 
 
 
 
 
 
 
 
 
 
69
  agent_state = gr.State("INITIAL")
70
  initial_topic_state = gr.State("")
71
 
72
- # --- Event Handlers ---
73
  def handle_initialization(google_key, tavily_key):
74
- initialize_models(google_key, tavily_key)
75
  return {
76
- chat_input: gr.update(interactive=True, placeholder="Models Initialized! What would you like to research?"),
77
- init_button: gr.update(value="Agent Ready!", interactive=False)
 
 
78
  }
79
 
80
- def chat_step(user_input, history, state, original_topic):
81
- history = history or []
82
-
83
- if state == "INITIAL":
 
 
 
 
84
  history.append([user_input, None])
85
- # Show a thinking message
86
- yield {chatbot: history, agent_state: "CLARIFYING", initial_topic_state: user_input, chat_input: gr.update(interactive=False, placeholder="Thinking...")}
87
 
88
- # Get clarifying questions
89
  questions = get_clarifying_questions(planner_model, user_input)
90
  history[-1][1] = "I can do that. To give you the best report, could you answer these questions for me?\n\n" + questions
91
- yield {chatbot: history, chat_input: gr.update(interactive=True, placeholder="Provide your answers to the questions above...")}
92
 
93
- elif state == "CLARIFYING":
 
 
94
  history.append([user_input, None])
95
- yield {chatbot: history, agent_state: "GENERATING", chat_input: gr.update(interactive=False, placeholder="Generating full report...")}
96
-
97
- # Plan the research
98
- status_updates = "### Agent Status\n"
99
- plan = research_and_plan(config, planner_model, tavily_client, original_topic, user_input)
100
-
101
- status_updates += f"**Research Plan:**\n- **Topic:** {plan['detailed_topic']}\n- **Sections:** {[s.title for s in plan['sections']]}\n\n---\n"
102
- history[-1][1] = status_updates
103
- yield {chatbot: history}
104
 
105
- # Generate the report, streaming updates
106
- report_generator = write_report_stream(config, writer_model, tavily_client, embedding_model, reranker, plan)
107
- final_report_md = ""
108
- for update in report_generator:
109
- if isinstance(update, str):
110
- final_report_md = update
111
- status_updates += update
112
- history[-1][1] = status_updates
113
- yield {chatbot: history}
 
 
 
 
 
 
 
 
114
 
115
- # Append the final report as a new message
116
- history.append([None, final_report_md])
117
- yield {chatbot: history, agent_state: "DONE", chat_input: gr.update(interactive=True, placeholder="Research complete. What's next?")}
 
 
118
 
119
  init_button.click(
120
  fn=handle_initialization,
121
  inputs=[google_api_key_input, tavily_api_key_input],
122
- outputs=[chat_input, init_button]
123
  )
124
 
125
  chat_input.submit(
126
  fn=chat_step,
127
- inputs=[chat_input, chatbot, agent_state, initial_topic_state],
128
- outputs=[chatbot, agent_state, initial_topic_state, chat_input]
129
  ).then(
130
- fn=lambda: "",
131
- outputs=[chat_input]
132
  )
133
 
134
  app.launch(debug=True)
 
7
  from research_agent.config import AgentConfig
8
  from research_agent.agent import get_clarifying_questions, research_and_plan, write_report_stream
9
 
10
+ # --- Improved CSS for a professional chatbot look ---
11
  CSS = """
12
+ body, .gradio-container { font-family: 'Inter', sans-serif; background-color: #F1F5F9; }
13
+ .gradio-container { max-width: 800px !important; margin: auto !important; }
14
+ h1 { text-align: center; font-weight: 700; font-size: 2.5em; color: #1E293B; }
15
+ .sub-header { text-align: center; color: #475569; margin-bottom: 20px; font-size: 1.1em; }
16
+ .accordion { border: none !important; box-shadow: 0 1px 3px 0 rgba(0, 0, 0, 0.1), 0 1px 2px -1px rgba(0, 0, 0, 0.1) !important; }
17
+ .chat-window { min-height: 450px; }
18
+ .message { box-shadow: 0 1px 3px 0 rgba(0, 0, 0, 0.1), 0 1px 2px -1px rgba(0, 0, 0, 0.1) !important; }
19
+ .message.user { background: #2563EB !important; color: white; border-bottom-right-radius: 2px !important; }
20
+ .message.bot { background: #FFFFFF !important; color: #334155; border: 1px solid #E2E8F0; border-bottom-left-radius: 2px !important; }
21
+ footer { display: none !important; }
 
 
22
  """
23
 
24
  # --- Model Initialization ---
 
36
  planner_model = genai.GenerativeModel(config.WRITER_MODEL)
37
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2', device='cpu')
38
  reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', device='cpu')
39
+ return "Models initialized successfully!"
40
  except Exception as e:
41
  raise gr.Error(f"Failed to initialize models. Error: {str(e)}")
42
 
43
  # --- Gradio Application Logic ---
44
+ with gr.Blocks(css=CSS, theme=gr.themes.Soft(primary_hue="blue", secondary_hue="slate")) as app:
45
+ gr.Markdown("<h1>Mini DeepSearch Agent</h1>")
46
+ gr.Markdown("<p class='sub-header'>Your AI partner for in-depth research and analysis.</p>")
47
 
48
+ with gr.Accordion("API & Settings", open=True, elem_classes="accordion") as settings:
49
+ with gr.Row():
50
+ google_api_key_input = gr.Textbox(label="Google API Key", type="password", placeholder="Enter Google AI API Key", scale=2)
51
+ tavily_api_key_input = gr.Textbox(label="Tavily API Key", type="password", placeholder="Enter Tavily Search API Key", scale=2)
52
+ init_button = gr.Button("Initialize Agent", scale=1, variant="primary")
53
+
54
+ initialization_status = gr.Markdown(visible=False)
 
 
 
 
 
 
 
 
 
55
 
56
+ chatbot = gr.Chatbot(
57
+ [],
58
+ elem_id="chatbot",
59
+ label="Research Agent",
60
+ bubble_full_width=False,
61
+ height=500,
62
+ avatar_images=(None, "https://www.gradio.app/images/logo.png"),
63
+ visible=False
64
+ )
65
+ chat_input = gr.Textbox(placeholder="What topic would you like to research?", interactive=False, visible=False)
66
+
67
+ # State Management
68
  agent_state = gr.State("INITIAL")
69
  initial_topic_state = gr.State("")
70
 
 
71
  def handle_initialization(google_key, tavily_key):
72
+ init_status = initialize_models(google_key, tavily_key)
73
  return {
74
+ initialization_status: gr.update(value=f"**Status:** {init_status}", visible=True),
75
+ chatbot: gr.update(visible=True, value=[(None, "Agent initialized. Please enter your research topic to begin.")]),
76
+ chat_input: gr.update(interactive=True, visible=True),
77
+ settings: gr.update(open=False)
78
  }
79
 
80
+ def chat_step(user_input, history):
81
+ current_state = agent_state.value
82
+ original_topic = initial_topic_state.value
83
+
84
+ if current_state == "INITIAL":
85
+ # 1. User provides the initial topic
86
+ agent_state.update("CLARIFYING")
87
+ initial_topic_state.update(user_input)
88
  history.append([user_input, None])
89
+ yield history, gr.update(interactive=False, placeholder="Thinking...")
 
90
 
 
91
  questions = get_clarifying_questions(planner_model, user_input)
92
  history[-1][1] = "I can do that. To give you the best report, could you answer these questions for me?\n\n" + questions
93
+ yield history, gr.update(interactive=True, placeholder="Provide your answers to the questions above...")
94
 
95
+ elif current_state == "CLARIFYING":
96
+ # 2. User provides answers to clarifying questions
97
+ agent_state.update("GENERATING")
98
  history.append([user_input, None])
99
+ yield history, gr.update(interactive=False, placeholder="Generating full report...")
 
 
 
 
 
 
 
 
100
 
101
+ try:
102
+ plan = research_and_plan(config, planner_model, tavily_client, original_topic, user_input)
103
+ report_generator = write_report_stream(config, writer_model, tavily_client, embedding_model, reranker, plan)
104
+
105
+ status_updates = ""
106
+ final_report_md = ""
107
+ for update in report_generator:
108
+ if isinstance(update, str) and "Report Generation Complete" not in update:
109
+ final_report_md = update # Keep track of the full report text
110
+ status_updates += update
111
+ history[-1][1] = status_updates # Stream thought process
112
+ yield history, gr.update(interactive=False)
113
+
114
+ history.append([None, final_report_md]) # Post the final report
115
+ agent_state.update("INITIAL") # Reset for next query
116
+ initial_topic_state.update("")
117
+ yield history, gr.update(interactive=True, placeholder="Research complete. What's the next topic?")
118
 
119
+ except Exception as e:
120
+ history.append([None, f"An error occurred: {str(e)}"])
121
+ agent_state.update("INITIAL")
122
+ initial_topic_state.update("")
123
+ yield history, gr.update(interactive=True, placeholder="Let's try again. What's the topic?")
124
 
125
  init_button.click(
126
  fn=handle_initialization,
127
  inputs=[google_api_key_input, tavily_api_key_input],
128
+ outputs=[initialization_status, chatbot, chat_input, settings]
129
  )
130
 
131
  chat_input.submit(
132
  fn=chat_step,
133
+ inputs=[chat_input, chatbot],
134
+ outputs=[chatbot, chat_input]
135
  ).then(
136
+ lambda: "", None, chat_input, queue=False
 
137
  )
138
 
139
  app.launch(debug=True)