MusaR commited on
Commit
1518c87
·
verified ·
1 Parent(s): 4c8af35

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -47
app.py CHANGED
@@ -7,14 +7,20 @@ from sentence_transformers import SentenceTransformer, CrossEncoder
7
  from research_agent.config import AgentConfig
8
  from research_agent.agent import get_clarifying_questions, research_and_plan, write_report_stream
9
 
10
- # --- CSS for a professional, ChatGPT-inspired look ---
 
 
 
 
 
 
 
 
11
  CSS = """
12
  body, .gradio-container { font-family: 'Inter', sans-serif; background-color: #343541; color: #ECECEC; }
13
  .gradio-container { max-width: 800px !important; margin: auto !important; padding-top: 2rem !important;}
14
  h1 { text-align: center; font-weight: 700; font-size: 2.5em; color: white; }
15
  .sub-header { text-align: center; color: #C5C5D2; margin-bottom: 2rem; font-size: 1.1em; }
16
- .accordion { background-color: #40414F; border: 1px solid #565869 !important; border-radius: 8px !important; }
17
- .accordion .gr-button { background-color: #4B4C5A; color: white; }
18
  #chatbot { box-shadow: none !important; border: none !important; background-color: transparent !important; }
19
  .message-bubble { background: #40414F !important; border: 1px solid #565869 !important; color: #ECECEC !important;}
20
  .message-bubble.user { background: #343541 !important; border: none !important; }
@@ -29,14 +35,15 @@ footer { display: none !important; }
29
 
30
  # --- Model Initialization ---
31
  config = AgentConfig()
 
32
  writer_model, planner_model, embedding_model, reranker, tavily_client = None, None, None, None, None
33
- IS_PROCESSING = False # Add a lock to prevent concurrent runs
34
 
35
- def initialize_models(google_key, tavily_key):
 
36
  global writer_model, planner_model, embedding_model, reranker, tavily_client, IS_PROCESSING
37
- if not google_key or not tavily_key:
38
- raise gr.Error("API keys are required.")
39
  try:
 
40
  genai.configure(api_key=google_key)
41
  tavily_client = TavilyClient(api_key=tavily_key)
42
  writer_model = genai.GenerativeModel(config.WRITER_MODEL)
@@ -44,50 +51,54 @@ def initialize_models(google_key, tavily_key):
44
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2', device='cpu')
45
  reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', device='cpu')
46
  except Exception as e:
47
- raise gr.Error(f"Failed to initialize models. Error: {str(e)}")
48
- IS_PROCESSING = False # Ensure lock is free on initialization
 
 
 
 
 
 
 
49
 
50
  # --- Gradio Application Logic ---
51
  with gr.Blocks(css=CSS, theme=gr.themes.Base()) as app:
52
  gr.Markdown("<h1>Mini DeepSearch Agent</h1>")
53
  gr.Markdown("<p class='sub-header'>Your AI partner for in-depth research and analysis.</p>")
54
 
55
- agent_state = gr.State("INITIAL")
56
  initial_topic_state = gr.State("")
57
 
58
- with gr.Accordion("API & Settings", open=True, elem_classes="accordion") as settings_accordion:
59
- with gr.Row():
60
- google_api_key_input = gr.Textbox(label="Google API Key", type="password", placeholder="Enter Google AI API Key", scale=2)
61
- tavily_api_key_input = gr.Textbox(label="Tavily API Key", type="password", placeholder="Enter Tavily Search API Key", scale=2)
62
- init_button = gr.Button("Initialize Agent", scale=1)
63
-
64
  chatbot = gr.Chatbot(
65
  elem_id="chatbot",
66
  bubble_full_width=False,
67
  height=500,
68
- visible=False
 
 
69
  )
70
-
71
  with gr.Row(elem_id="chat-input-container"):
72
- chat_input = gr.Textbox(placeholder="What would you like to research?", interactive=False, visible=False, show_label=False, scale=8)
73
- submit_button = gr.Button("Submit", elem_id="submit-button", visible=False, scale=1)
74
-
75
- def handle_initialization(google_key, tavily_key):
76
- initialize_models(google_key, tavily_key)
77
- return {
78
- chatbot: gr.update(visible=True, value=[(None, "Agent initialized. Please enter your research topic to begin.")]),
79
- chat_input: gr.update(interactive=True, visible=True),
80
- submit_button: gr.update(visible=True),
81
- settings_accordion: gr.update(open=False)
82
- }
83
 
84
  def chat_step_wrapper(user_input, history, current_agent_state, topic_state):
85
  """A wrapper to manage the processing lock."""
86
  global IS_PROCESSING
87
  if IS_PROCESSING:
88
  print("Ignoring duplicate request while processing.")
89
- if False: # This makes the function a generator
90
- yield
91
  return
92
 
93
  IS_PROCESSING = True
@@ -107,7 +118,7 @@ with gr.Blocks(css=CSS, theme=gr.themes.Base()) as app:
107
  def chat_step(user_input, history, current_agent_state, topic_state):
108
  history = history or []
109
  history.append((user_input, None))
110
-
111
  if current_agent_state == "INITIAL":
112
  yield history, "CLARIFYING", user_input, gr.update(interactive=False, placeholder="Thinking...")
113
  questions = get_clarifying_questions(planner_model, user_input)
@@ -118,35 +129,26 @@ with gr.Blocks(css=CSS, theme=gr.themes.Base()) as app:
118
  thinking_message = "Got it. Generating your full research report. This will take a moment..."
119
  history[-1] = (user_input, thinking_message)
120
  yield history, "GENERATING", topic_state, gr.update(interactive=False, placeholder="Generating...")
121
-
122
  try:
123
  plan = research_and_plan(config, planner_model, tavily_client, topic_state, user_input)
124
  report_generator = write_report_stream(config, writer_model, tavily_client, embedding_model, reranker, plan)
125
-
126
  stream_content = ""
127
  for update in report_generator:
128
  stream_content = update
129
  history[-1] = (user_input, stream_content)
130
  yield history, "GENERATING", topic_state, gr.update(interactive=False)
131
-
132
  yield history, "INITIAL", "", gr.update(interactive=True, placeholder="Research complete. What's the next topic?")
133
-
134
  except Exception as e:
135
  error_message = f"An error occurred: {str(e)}"
136
  history.append((None, error_message))
137
  yield history, "INITIAL", "", gr.update(interactive=True, placeholder="Let's try again. What's the topic?")
138
 
139
  # --- Event Listeners ---
140
- # This section is rewritten to prevent duplicate triggers.
141
-
142
- init_button.click(
143
- fn=handle_initialization,
144
- inputs=[google_api_key_input, tavily_api_key_input],
145
- outputs=[chatbot, chat_input, submit_button, settings_accordion]
146
- )
147
-
148
- # We define a single submission event and trigger it from both the button and the textbox.
149
- # It now calls the wrapper function to handle the processing lock.
150
  submit_event = submit_button.click(
151
  fn=chat_step_wrapper,
152
  inputs=[chat_input, chatbot, agent_state, initial_topic_state],
@@ -169,4 +171,4 @@ with gr.Blocks(css=CSS, theme=gr.themes.Base()) as app:
169
  queue=False
170
  )
171
 
172
- app.launch(debug=True)
 
7
  from research_agent.config import AgentConfig
8
  from research_agent.agent import get_clarifying_questions, research_and_plan, write_report_stream
9
 
10
+
11
+ google_key = os.getenv("GOOGLE_API_KEY")
12
+ tavily_key = os.getenv("TAVILY_API_KEY")
13
+
14
+ if not google_key or not tavily_key:
15
+ raise ValueError("API keys not found.")
16
+
17
+
18
+
19
  CSS = """
20
  body, .gradio-container { font-family: 'Inter', sans-serif; background-color: #343541; color: #ECECEC; }
21
  .gradio-container { max-width: 800px !important; margin: auto !important; padding-top: 2rem !important;}
22
  h1 { text-align: center; font-weight: 700; font-size: 2.5em; color: white; }
23
  .sub-header { text-align: center; color: #C5C5D2; margin-bottom: 2rem; font-size: 1.1em; }
 
 
24
  #chatbot { box-shadow: none !important; border: none !important; background-color: transparent !important; }
25
  .message-bubble { background: #40414F !important; border: 1px solid #565869 !important; color: #ECECEC !important;}
26
  .message-bubble.user { background: #343541 !important; border: none !important; }
 
35
 
36
  # --- Model Initialization ---
37
  config = AgentConfig()
38
+
39
  writer_model, planner_model, embedding_model, reranker, tavily_client = None, None, None, None, None
40
+ IS_PROCESSING = False
41
 
42
+ def initialize_models():
43
+ """Initializes all the models and clients using keys from environment variables."""
44
  global writer_model, planner_model, embedding_model, reranker, tavily_client, IS_PROCESSING
 
 
45
  try:
46
+
47
  genai.configure(api_key=google_key)
48
  tavily_client = TavilyClient(api_key=tavily_key)
49
  writer_model = genai.GenerativeModel(config.WRITER_MODEL)
 
51
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2', device='cpu')
52
  reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', device='cpu')
53
  except Exception as e:
54
+ # This error will be displayed in the Hugging Face logs
55
+ print(f"FATAL: Failed to initialize models. Error: {str(e)}")
56
+ # Raise an exception to stop the app from running incorrectly
57
+ raise gr.Error(f"Failed to initialize models. Please check the logs. Error: {str(e)}")
58
+ IS_PROCESSING = False
59
+ print("Models and clients initialized successfully.")
60
+
61
+ # --- 3. Initialize models on application startup ---
62
+ initialize_models()
63
 
64
  # --- Gradio Application Logic ---
65
  with gr.Blocks(css=CSS, theme=gr.themes.Base()) as app:
66
  gr.Markdown("<h1>Mini DeepSearch Agent</h1>")
67
  gr.Markdown("<p class='sub-header'>Your AI partner for in-depth research and analysis.</p>")
68
 
69
+ agent_state = gr.State("INITIAL")
70
  initial_topic_state = gr.State("")
71
 
72
+
 
 
 
 
 
73
  chatbot = gr.Chatbot(
74
  elem_id="chatbot",
75
  bubble_full_width=False,
76
  height=500,
77
+ # --- 5. Make the chatbot visible from the start ---
78
+ visible=True,
79
+ value=[(None, "Agent is ready. What would you like to research?")]
80
  )
81
+
82
  with gr.Row(elem_id="chat-input-container"):
83
+ chat_input = gr.Textbox(
84
+ placeholder="What would you like to research?",
85
+ # --- 6. Make the input box interactive and visible from the start ---
86
+ interactive=True,
87
+ visible=True,
88
+ show_label=False,
89
+ scale=8
90
+ )
91
+ submit_button = gr.Button("Submit", elem_id="submit-button", visible=True, scale=1)
92
+
93
+ # The handle_initialization function is no longer needed.
94
 
95
  def chat_step_wrapper(user_input, history, current_agent_state, topic_state):
96
  """A wrapper to manage the processing lock."""
97
  global IS_PROCESSING
98
  if IS_PROCESSING:
99
  print("Ignoring duplicate request while processing.")
100
+ # This is a generator, so we must yield something. An empty yield is fine.
101
+ if False: yield
102
  return
103
 
104
  IS_PROCESSING = True
 
118
  def chat_step(user_input, history, current_agent_state, topic_state):
119
  history = history or []
120
  history.append((user_input, None))
121
+
122
  if current_agent_state == "INITIAL":
123
  yield history, "CLARIFYING", user_input, gr.update(interactive=False, placeholder="Thinking...")
124
  questions = get_clarifying_questions(planner_model, user_input)
 
129
  thinking_message = "Got it. Generating your full research report. This will take a moment..."
130
  history[-1] = (user_input, thinking_message)
131
  yield history, "GENERATING", topic_state, gr.update(interactive=False, placeholder="Generating...")
132
+
133
  try:
134
  plan = research_and_plan(config, planner_model, tavily_client, topic_state, user_input)
135
  report_generator = write_report_stream(config, writer_model, tavily_client, embedding_model, reranker, plan)
136
+
137
  stream_content = ""
138
  for update in report_generator:
139
  stream_content = update
140
  history[-1] = (user_input, stream_content)
141
  yield history, "GENERATING", topic_state, gr.update(interactive=False)
142
+
143
  yield history, "INITIAL", "", gr.update(interactive=True, placeholder="Research complete. What's the next topic?")
144
+
145
  except Exception as e:
146
  error_message = f"An error occurred: {str(e)}"
147
  history.append((None, error_message))
148
  yield history, "INITIAL", "", gr.update(interactive=True, placeholder="Let's try again. What's the topic?")
149
 
150
  # --- Event Listeners ---
151
+
 
 
 
 
 
 
 
 
 
152
  submit_event = submit_button.click(
153
  fn=chat_step_wrapper,
154
  inputs=[chat_input, chatbot, agent_state, initial_topic_state],
 
171
  queue=False
172
  )
173
 
174
+ app.launch(debug=True)