Carolzinha2010 commited on
Commit
40373e7
·
verified ·
1 Parent(s): 64c9499

Create app.py

Browse files

print("Application script started.") # Debugging print statement

import os
import gradio as gr
import requests
import inspect
import pandas as pd

# Import libraries for SerpAPI
from serpapi import GoogleSearch
# Removed google.generativeai import as LLM is not currently usable


# --- Get API Keys from Environment Variables ---
# SERPAPI_API_KEY and GOOGLE_API_KEY should be set as secrets in your Hugging Face Space
SERPAPI_API_KEY = os.getenv('SERPAPI_API_KEY')
print(f"SERPAPI_API_KEY (first 5 chars): {SERPAPI_API_KEY[:5] if SERPAPI_API_KEY else 'None'}...") # Debugging API key

# Removed GOOGLE_API_KEY as it's not used in this version
# GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
# print(f"GOOGLE_API_KEY (first 5 chars): {GOOGLE_API_KEY[:5] if GOOGLE_API_KEY else 'None'}...") # Debugging API key

# --- Define the default API URL ---
DEFAULT_API_URL = "https://agent-challenge.hf.space/agent_challenge" # Or the correct API URL if different


# --- Google Generative AI LLM Initialization (Removed) ---
# print("Attempting to initialize Google Generative AI model...") # Debugging print before loading
# gemini_model = None # Initialize to None
# if not GOOGLE_API_KEY:
# print("Warning: GOOGLE_API_KEY environment variable not set. LLM will not be available.")
# else:
# try:
# # Configure the generative AI library
# genai.configure(api_key=GOOGLE_API_KEY)
# print("Google Generative AI configured.")
# gemini_model = genai.GenerativeModel('gemini-1.5-flash')
# print("Gemini model initialized successfully.") # Debugging print after successful init
# except Exception as e:
# print(f"An error occurred during Google Generative AI initialization: {e}")
# gemini_model = None # Ensure model is None if initialization fails


# --- Web Search Function (using SerpAPI) ---
def web_search(query: str) -> list[dict]:
# Removed global gemini_model declaration as it's not used here
"""
Performs a web search using SerpAPI and returns relevant information.

Args:
query: The search query string.

Returns:
A list of dictionaries, where each dictionary represents a search result
with keys 'title', 'snippet', and 'url'. Returns an empty list if no
results are found or an error occurs.
"""
print(f"web_search called with query: {query[:50]}...") # Debugging web_search call
if not SERPAPI_API_KEY:
print("SerpAPI key not found in environment variables.")
return []

params = {
"q": query,
"api_key": SERPAPI_API_KEY,
"engine": "google", # Use Google search engine
"num": 5 # Number of results to fetch
}
results = []

try:
search = GoogleSearch(params)
search_results_dict = search.get_dict() # Get results as a dictionary
print(f"SerpAPI raw response keys: {search_results_dict.keys() if isinstance(search_results_dict, dict) else 'Response is not a dictionary'}") # Debugging response keys

# Extract organic results
# Add check that search_results_dict and organic_results are valid
if isinstance(search_results_dict, dict) and "organic_results" in search_results_dict and isinstance(search_results_dict["organic_results"], list):
print(f"Found {len(search_results_dict['organic_results'])} organic results.") # Debugging result count
for result in search_results_dict["organic_results"]:
# Add check for None or non-dict result item
if result is None or not isinstance(result, dict):
print(f"Skipping invalid search result item: {result}")
continue
item = {
'title': result.get('title'),
'url': result.get('link'),
'snippet': result.get('snippet', 'No snippet available')
}
results.append(item)
else:
print(f"No 'organic_results' key found or invalid format in SerpAPI response. Response type: {type(search_results_dict)}")
# Print the whole response if no organic_results are found for debugging
# print(f"SerpAPI response (no organic results): {search_results_dict}")


except Exception as e:
print(f"An error occurred during SerpAPI web search: {e}")
# Ensure an empty list is returned on error
return []

print(f"web_search returning {len(results)} results.") # Debugging return count
return results # Always return a list (empty or with results)


# --- Basic Agent Definition (Modified to remove LLM dependency for now) ---
class BasicAgent:

def __init__(self):
print("BasicAgent initialized.") # Debugging print before init
# Removed LLM check as it's not used here
# global gemini_model # Access global variable
# if gemini_model is None:
# print("Warning: Google Generative AI model not successfully loaded before agent initialization.")
# else:
# print("Google Generative AI model found and ready.") # Debugging print after successful init


def __call__(self, question: str) -> str:
# Removed global gemini_model declaration as it's not used here
print(f"Agent received question (first 50 chars): {question[:50]}...")

# Simple logic to determine if a web search is needed
question_lower = question.lower()
search_keywords = ["what is", "how to", "where is", "who is", "when did", "define", "explain", "tell me about"]
needs_search = any(keyword in question_lower for keyword in search_keywords) or "?" in question
print(f"Needs search: {needs_search}") # Debugging search decision

# --- Analyze question and refine search query ---
# Simplified search query generation - removed LLM query generation
search_query = question # Default search query is the original question
if needs_search:
print("Analyzing question for keywords and refining search query...")
# Basic keyword extraction: split by common question words and take the rest
parts = question_lower.split("what is", 1)
if len(parts) > 1:
search_query = parts[1].strip()
else:
parts = question_lower.split("how to", 1)
if len(parts) > 1:
search_query = parts[1].strip()
else:
parts = question_lower.split("where is", 1)
if len(parts) > 1:
search_query = parts[1].strip()
else:
parts = question_lower.split("who is", 1)
if len(parts) > 1:
search_query = parts[1].strip()
else:
parts = question_lower.split("when did", 1)
if len(parts) > 1:
search_query = parts[1].strip()
else:
parts = question_lower.split("define", 1)
if len(parts) > 1:
search_query = parts[1].strip()
else:
parts = question_lower.split("explain", 1)
if len(parts) > 1:
search_query = parts[1].strip()
else:
parts = question_lower.split("tell me about", 1)
if len(parts) > 1:
search_query = parts[1].strip()
else:
# If no specific question keyword found, use the whole question
search_query = question_lower.strip()


# Optional: Add quotation marks for multi-word phrases if identified
# This simple approach just uses the extracted part as is.
# A more complex approach would identify multi-word entities (e.g., "New York City")
# and wrap them in quotes.

# Optional: Add contextual terms
# Example: If "musician" or "band" is in the question, add "discography"
if any(word in question_lower for word in ["musician", "band", "artist", "singer"]):
search_query += " discography"
elif any(word in question_lower for word in ["movie", "film", "actor", "actress"]):
search_query += " plot summary"
elif any(word in question_lower for word in ["book", "author", "novel"]):
search_query += " plot summary"


print(f"Final search query used: {search_query}") # Debugging final query

search_results = [] # Initialize search_results to an empty list before the try block
if needs_search:
print(f"Question likely requires search. Searching for: {search_query}")
try:
search_results = web_search(search_query) # Call the web_search function with the generated query
print(f"Received {len(search_results)} search results from web_search.") # Debugging results received
print(f"Type of search_results: {type(search_results)}") # Debugging type of search_results
except Exception as e:
print(f"An error occurred during web search: {e}")
return f"An error occurred during web search: {e}"

# --- Use LLM to process search results if available (Removed LLM Synthesis) ---
# Check that search_results is a list and is not empty
if isinstance(search_results, list) and search_results:
print("Returning basic answer based on search results (LLM not available).")
answer_parts = []

Files changed (1) hide show
  1. app.py +95 -250
app.py CHANGED
@@ -1,17 +1,14 @@
 
 
1
  import os
2
  import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
6
 
7
- # Import libraries for SerpAPI and Google Generative AI
8
  from serpapi import GoogleSearch
9
- import google.generativeai as genai
10
-
11
- # --- Constants ---
12
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" # Corrected API URL
13
-
14
- print("Application script started.") # Debugging print statement
15
 
16
 
17
  # --- Get API Keys from Environment Variables ---
@@ -19,37 +16,34 @@ print("Application script started.") # Debugging print statement
19
  SERPAPI_API_KEY = os.getenv('SERPAPI_API_KEY')
20
  print(f"SERPAPI_API_KEY (first 5 chars): {SERPAPI_API_KEY[:5] if SERPAPI_API_KEY else 'None'}...") # Debugging API key
21
 
22
- GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
23
- print(f"GOOGLE_API_KEY (first 5 chars): {GOOGLE_API_KEY[:5] if GOOGLE_API_KEY else 'None'}...") # Debugging API key
24
-
25
 
26
- # --- Google Generative AI LLM Initialization ---
27
- print("Attempting to initialize Google Generative AI model...") # Debugging print before loading
28
 
29
- gemini_model = None # Initialize to None
30
 
31
- if not GOOGLE_API_KEY:
32
- print("Warning: GOOGLE_API_KEY environment variable not set. LLM will not be available.")
33
- else:
34
- try:
35
- # Configure the generative AI library
36
- genai.configure(api_key=GOOGLE_API_KEY)
37
- print("Google Generative AI configured.")
38
-
39
- # Initialize the Generative Model
40
- # Using a fast and efficient model like gemini-1.5-flash
41
- # You can explore other models like 'gemini-1.5-pro' for potentially better results
42
- gemini_model = genai.GenerativeModel('gemini-1.5-flash')
43
- print("Gemini model initialized successfully.") # Debugging print after successful init
44
-
45
- except Exception as e:
46
- print(f"An error occurred during Google Generative AI initialization: {e}")
47
- gemini_model = None # Ensure model is None if initialization fails
48
 
49
 
50
  # --- Web Search Function (using SerpAPI) ---
51
  def web_search(query: str) -> list[dict]:
52
- global gemini_model # Ensure global declaration is first
53
  """
54
  Performs a web search using SerpAPI and returns relevant information.
55
 
@@ -109,22 +103,21 @@ def web_search(query: str) -> list[dict]:
109
  return results # Always return a list (empty or with results)
110
 
111
 
112
- # --- Basic Agent Definition (Updated to use Google LLM) ---
113
  class BasicAgent:
114
 
115
  def __init__(self):
116
  print("BasicAgent initialized.") # Debugging print before init
117
- # Check if LLM is loaded (optional but good practice)
118
- global gemini_model # Access global variable
119
- if gemini_model is None:
120
- print("Warning: Google Generative AI model not successfully loaded before agent initialization.")
121
- # The agent can still perform search but won't use the LLM for synthesis
122
- else:
123
- print("Google Generative AI model found and ready.") # Debugging print after successful init
124
 
125
 
126
  def __call__(self, question: str) -> str:
127
- global gemini_model # Ensure global declaration is first
128
  print(f"Agent received question (first 50 chars): {question[:50]}...")
129
 
130
  # Simple logic to determine if a web search is needed
@@ -134,138 +127,45 @@ class BasicAgent:
134
  print(f"Needs search: {needs_search}") # Debugging search decision
135
 
136
  # --- Analyze question and refine search query ---
 
137
  search_query = question # Default search query is the original question
138
  if needs_search:
139
  print("Analyzing question for keywords and refining search query...")
140
- # A more refined approach: identify potential entities or key phrases
141
- # This is a simplified example; advanced agents might use NLP libraries (spaCy, NLTK)
142
- # or even the LLM itself to extract optimal search terms.
143
-
144
- # Simple approach: Use LLM to generate search query
145
- if gemini_model is not None:
146
- print("Using LLM to generate search query.")
147
- query_prompt = f"""Given the following question, generate the most effective web search query to find information to answer it.
148
- Focus on extracting key entities and concepts. Do not include question words like "what is" or "how to".
149
-
150
- Question: {question}
151
-
152
- Search Query:"""
153
- try:
154
- response = gemini_model.generate_content(query_prompt)
155
- generated_query = response.text.strip()
156
- if generated_query and len(generated_query.split()) > 1: # Ensure it's not empty or just one word
157
- search_query = generated_query
158
- print(f"LLM generated search query: {search_query}")
159
- else:
160
- print(f"LLM generated empty or single-word query: '{generated_query}'. Falling back to basic extraction.")
161
- # Fallback to basic extraction if LLM fails
162
- parts = question_lower.split("what is", 1)
163
- if len(parts) > 1:
164
- search_query = parts[1].strip()
165
- else:
166
- parts = question_lower.split("how to", 1)
167
- if len(parts) > 1:
168
- search_query = parts[1].strip()
169
- else:
170
- parts = question_lower.split("where is", 1)
171
- if len(parts) > 1:
172
- search_query = parts[1].strip()
173
- else:
174
- parts = question_lower.split("who is", 1)
175
- if len(parts) > 1:
176
- search_query = parts[1].strip()
177
- else:
178
- parts = question_lower.split("when did", 1)
179
- if len(parts) > 1:
180
- search_query = parts[1].strip()
181
- else:
182
- parts = question_lower.split("define", 1)
183
- if len(parts) > 1:
184
- search_query = parts[1].strip()
185
- else:
186
- parts = question_lower.split("explain", 1)
187
- if len(parts) > 1:
188
- search_query = parts[1].strip()
189
- else:
190
- parts = question_lower.split("tell me about", 1)
191
- if len(parts) > 1:
192
- search_query = parts[1].strip()
193
- else:
194
- search_query = question_lower.strip() # Fallback to whole question
195
-
196
-
197
- except Exception as llm_e:
198
- print(f"An error occurred during LLM search query generation: {llm_e}. Falling back to basic extraction.")
199
- # Fallback to basic extraction if LLM call fails
200
- parts = question_lower.split("what is", 1)
201
- if len(parts) > 1:
202
- search_query = parts[1].strip()
203
- else:
204
- parts = question_lower.split("how to", 1)
205
- if len(parts) > 1:
206
- search_query = parts[1].strip()
207
- else:
208
- parts = question_lower.split("where is", 1)
209
- if len(parts) > 1:
210
- search_query = parts[1].strip()
211
- else:
212
- parts = question_lower.split("who is", 1)
213
- if len(parts) > 1:
214
- search_query = parts[1].strip()
215
- else:
216
- parts = question_lower.split("when did", 1)
217
- if len(parts) > 1:
218
- search_query = parts[1].strip()
219
- else:
220
- parts = question_lower.split("define", 1)
221
- if len(parts) > 1:
222
- search_query = parts[1].strip()
223
- else:
224
- parts = question_lower.split("explain", 1)
225
- if len(parts) > 1:
226
- search_query = parts[1].strip()
227
- else:
228
- parts = question_lower.split("tell me about", 1)
229
- if len(parts) > 1:
230
- search_query = parts[1].strip()
231
- else:
232
- search_query = question_lower.strip() # Fallback to whole question
233
- else: # LLM not available
234
- print("LLM not available. Using basic search query extraction.")
235
- # Fallback to basic extraction if LLM is not initialized
236
- parts = question_lower.split("what is", 1)
237
- if len(parts) > 1:
238
- search_query = parts[1].strip()
239
- else:
240
- parts = question_lower.split("how to", 1)
241
  if len(parts) > 1:
242
  search_query = parts[1].strip()
243
  else:
244
- parts = question_lower.split("where is", 1)
245
- if len(parts) > 1:
246
- search_query = parts[1].strip()
247
- else:
248
- parts = question_lower.split("who is", 1)
249
  if len(parts) > 1:
250
  search_query = parts[1].strip()
251
  else:
252
- parts = question_lower.split("when did", 1)
253
  if len(parts) > 1:
254
  search_query = parts[1].strip()
255
  else:
256
- parts = question_lower.split("define", 1)
257
  if len(parts) > 1:
258
  search_query = parts[1].strip()
259
  else:
260
- parts = question_lower.split("explain", 1)
261
  if len(parts) > 1:
262
  search_query = parts[1].strip()
263
  else:
264
- parts = question_lower.split("tell me about", 1)
265
- if len(parts) > 1:
266
- search_query = parts[1].strip()
267
- else:
268
- search_query = question_lower.strip() # Fallback to whole question
269
 
270
 
271
  # Optional: Add quotation marks for multi-word phrases if identified
@@ -285,106 +185,52 @@ Search Query:"""
285
 
286
  print(f"Final search query used: {search_query}") # Debugging final query
287
 
288
-
289
  if needs_search:
290
  print(f"Question likely requires search. Searching for: {search_query}")
291
- search_results = web_search(search_query) # Call the web_search function with the generated query
292
- print(f"Received {len(search_results)} search results from web_search.") # Debugging results received
293
- print(f"Type of search_results: {type(search_results)}") # Debugging type of search_results
294
-
295
-
296
- # --- Use LLM to process search results if available ---
297
- # Add check that search_results is a list before proceeding
298
- if isinstance(search_results, list) and search_results and gemini_model is not None:
299
- print("Using Google LLM to process search results.") # Debugging print before LLM call
300
-
301
- # Format search results for the LLM
302
- context = ""
303
- for i, result in enumerate(search_results[:5]): # Use top 5 results for context
304
- # Add check for None or non-dict result item before accessing keys
305
- if result is None or not isinstance(result, dict):
306
- print(f"Skipping invalid result at index {i} in LLM context formatting: {result}")
307
- continue
308
- context += f"Source {i+1}:\n"
309
- if result.get('title'):
310
- context += f"Title: {result['title']}\n"
311
- if result.get('snippet'):
312
- context += f"Snippet: {result['snippet']}\n"
313
- if result.get('url'):
314
- context += f"URL: {result['url']}\n"
315
- context += "---\n" # Separator
316
-
317
- # Create a prompt for the LLM
318
- prompt = f"""Using the following search results, answer the question accurately.
319
- If the search results do not contain enough information to answer the question,
320
- respond with "I couldn't find enough information in the search results."
321
-
322
- Question: {question}
323
-
324
- Search Results:
325
- {context}
326
-
327
- Answer:"""
328
-
329
- print(f"LLM Prompt (first 500 chars):\n{prompt[:500]}...") # Debugging prompt
330
-
331
- try:
332
- # Generate content using the Gemini model
333
- response = gemini_model.generate_content(prompt)
334
- generated_text = response.text # Get the generated text
335
-
336
- # Extract only the answer part from the generated text if necessary
337
- # Depending on the prompt and model, the model might repeat the prompt.
338
- # A simple way is to look for the "Answer:" tag.
339
- answer_tag = "Answer:"
340
- if answer_tag in generated_text:
341
- llm_answer = generated_text.split(answer_tag, 1)[1].strip()
342
- else:
343
- llm_answer = generated_text.strip() # Fallback if tag not found
344
-
345
-
346
- print(f"LLM generated text (first 100 chars): {generated_text[:100]}...") # Debugging raw output
347
- print(f"Agent returning LLM-based answer (first 100 chars): {llm_answer[:100]}...") # Debugging final answer
348
-
349
- if llm_answer:
350
- return llm_answer
351
- else:
352
- # Fallback if LLM generates empty response
353
- print("LLM generated an empty response.")
354
- return "I couldn't generate an answer based on the search results."
355
-
356
- except Exception as llm_e:
357
- print(f"An error occurred during LLM generation: {llm_e}")
358
- return f"An error occurred while processing search results with the LLM: {llm_e}"
359
-
360
- # Fallback if search results are empty OR LLM is None (due to initialization error)
361
- elif isinstance(search_results, list) and search_results: # Search results exist and is a list, but LLM is not available
362
- print("Google Generative AI model not loaded. Cannot use LLM.")
363
- # Return the old style answer if LLM is not available, but only if search results exist
364
- print("Returning basic answer based on search results (LLM not available).")
365
- answer_parts = []
366
- for i, result in enumerate(search_results[:3]):
367
- # Add check for None or non-dict result item before accessing keys
368
- if result is None or not isinstance(result, dict):
369
- print(f"Skipping invalid result at index {i} in basic answer formatting: {result}")
370
- continue
371
- if result.get('snippet'):
372
- answer_parts.append(f"Snippet {i+1}: {result['snippet']}")
373
- elif result.get('title'):
374
- answer_parts.append(f"Result {i+1} Title: {result['title']}")
375
- if answer_parts:
376
- return "Based on web search (LLM not available):\n" + "\n".join(answer_parts)
377
- else:
378
  return "I couldn't find useful information in the search results (LLM not available)."
379
  else: # search_results is None or not a list, or empty
380
  print(f"Web search returned no results or results in invalid format. Type: {type(search_results)}")
381
  return "I couldn't find any relevant information on the web for your question."
382
 
383
- else:
384
- # If no search is needed, return a default or simple response
385
- print("Question does not appear to require search. Returning fixed answer.")
386
- fixed_answer = "How can I help you?"
387
- return fixed_answer
 
 
 
 
 
 
388
 
389
 
390
  def run_and_submit_all( profile: gr.OAuthProfile | None, other_arg=None): # Modified to accept 2 arguments
@@ -410,7 +256,6 @@ def run_and_submit_all( profile: gr.OAuthProfile | None, other_arg=None): # Modi
410
  # 1. Instantiate Agent ( modify this part to create your agent)
411
  print("Attempting to instantiate BasicAgent...") # Debugging print before instantiation
412
  try:
413
- # The error occurs when trying to *call* this instantiated object
414
  agent = BasicAgent()
415
  print("BasicAgent instantiated successfully.") # Debugging print after instantiation
416
  except Exception as e:
@@ -462,7 +307,6 @@ def run_and_submit_all( profile: gr.OAuthProfile | None, other_arg=None): # Modi
462
  continue
463
  print(f"Processing Task ID: {task_id}") # Debugging task ID
464
  try:
465
- # Error occurs here: 'BasicAgent' object is not callable
466
  submitted_answer = agent(question_text)
467
  print(f"Agent returned answer for {task_id}: {submitted_answer[:50]}...") # Debugging returned answer
468
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
@@ -563,4 +407,5 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Basic Agent Evaluation Runner") as
563
  )
564
 
565
  # Ensure the app launches when the script is run
566
- demo.launch(server_name="0.0.0.0") # Ensure binding to all interfaces
 
 
1
+ print("Application script started.") # Debugging print statement
2
+
3
  import os
4
  import gradio as gr
5
  import requests
6
  import inspect
7
  import pandas as pd
8
 
9
+ # Import libraries for SerpAPI
10
  from serpapi import GoogleSearch
11
+ # Removed google.generativeai import as LLM is not currently usable
 
 
 
 
 
12
 
13
 
14
  # --- Get API Keys from Environment Variables ---
 
16
  SERPAPI_API_KEY = os.getenv('SERPAPI_API_KEY')
17
  print(f"SERPAPI_API_KEY (first 5 chars): {SERPAPI_API_KEY[:5] if SERPAPI_API_KEY else 'None'}...") # Debugging API key
18
 
19
+ # Removed GOOGLE_API_KEY as it's not used in this version
20
+ # GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
21
+ # print(f"GOOGLE_API_KEY (first 5 chars): {GOOGLE_API_KEY[:5] if GOOGLE_API_KEY else 'None'}...") # Debugging API key
22
 
23
+ # --- Define the default API URL ---
24
+ DEFAULT_API_URL = "https://agent-challenge.hf.space/agent_challenge" # Or the correct API URL if different
25
 
 
26
 
27
+ # --- Google Generative AI LLM Initialization (Removed) ---
28
+ # print("Attempting to initialize Google Generative AI model...") # Debugging print before loading
29
+ # gemini_model = None # Initialize to None
30
+ # if not GOOGLE_API_KEY:
31
+ # print("Warning: GOOGLE_API_KEY environment variable not set. LLM will not be available.")
32
+ # else:
33
+ # try:
34
+ # # Configure the generative AI library
35
+ # genai.configure(api_key=GOOGLE_API_KEY)
36
+ # print("Google Generative AI configured.")
37
+ # gemini_model = genai.GenerativeModel('gemini-1.5-flash')
38
+ # print("Gemini model initialized successfully.") # Debugging print after successful init
39
+ # except Exception as e:
40
+ # print(f"An error occurred during Google Generative AI initialization: {e}")
41
+ # gemini_model = None # Ensure model is None if initialization fails
 
 
42
 
43
 
44
  # --- Web Search Function (using SerpAPI) ---
45
  def web_search(query: str) -> list[dict]:
46
+ # Removed global gemini_model declaration as it's not used here
47
  """
48
  Performs a web search using SerpAPI and returns relevant information.
49
 
 
103
  return results # Always return a list (empty or with results)
104
 
105
 
106
+ # --- Basic Agent Definition (Modified to remove LLM dependency for now) ---
107
  class BasicAgent:
108
 
109
  def __init__(self):
110
  print("BasicAgent initialized.") # Debugging print before init
111
+ # Removed LLM check as it's not used here
112
+ # global gemini_model # Access global variable
113
+ # if gemini_model is None:
114
+ # print("Warning: Google Generative AI model not successfully loaded before agent initialization.")
115
+ # else:
116
+ # print("Google Generative AI model found and ready.") # Debugging print after successful init
 
117
 
118
 
119
  def __call__(self, question: str) -> str:
120
+ # Removed global gemini_model declaration as it's not used here
121
  print(f"Agent received question (first 50 chars): {question[:50]}...")
122
 
123
  # Simple logic to determine if a web search is needed
 
127
  print(f"Needs search: {needs_search}") # Debugging search decision
128
 
129
  # --- Analyze question and refine search query ---
130
+ # Simplified search query generation - removed LLM query generation
131
  search_query = question # Default search query is the original question
132
  if needs_search:
133
  print("Analyzing question for keywords and refining search query...")
134
+ # Basic keyword extraction: split by common question words and take the rest
135
+ parts = question_lower.split("what is", 1)
136
+ if len(parts) > 1:
137
+ search_query = parts[1].strip()
138
+ else:
139
+ parts = question_lower.split("how to", 1)
140
+ if len(parts) > 1:
141
+ search_query = parts[1].strip()
142
+ else:
143
+ parts = question_lower.split("where is", 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  if len(parts) > 1:
145
  search_query = parts[1].strip()
146
  else:
147
+ parts = question_lower.split("who is", 1)
148
+ if len(parts) > 1:
149
+ search_query = parts[1].strip()
150
+ else:
151
+ parts = question_lower.split("when did", 1)
152
  if len(parts) > 1:
153
  search_query = parts[1].strip()
154
  else:
155
+ parts = question_lower.split("define", 1)
156
  if len(parts) > 1:
157
  search_query = parts[1].strip()
158
  else:
159
+ parts = question_lower.split("explain", 1)
160
  if len(parts) > 1:
161
  search_query = parts[1].strip()
162
  else:
163
+ parts = question_lower.split("tell me about", 1)
164
  if len(parts) > 1:
165
  search_query = parts[1].strip()
166
  else:
167
+ # If no specific question keyword found, use the whole question
168
+ search_query = question_lower.strip()
 
 
 
169
 
170
 
171
  # Optional: Add quotation marks for multi-word phrases if identified
 
185
 
186
  print(f"Final search query used: {search_query}") # Debugging final query
187
 
188
+ search_results = [] # Initialize search_results to an empty list before the try block
189
  if needs_search:
190
  print(f"Question likely requires search. Searching for: {search_query}")
191
+ try:
192
+ search_results = web_search(search_query) # Call the web_search function with the generated query
193
+ print(f"Received {len(search_results)} search results from web_search.") # Debugging results received
194
+ print(f"Type of search_results: {type(search_results)}") # Debugging type of search_results
195
+ except Exception as e:
196
+ print(f"An error occurred during web search: {e}")
197
+ return f"An error occurred during web search: {e}"
198
+
199
+ # --- Use LLM to process search results if available (Removed LLM Synthesis) ---
200
+ # Check that search_results is a list and is not empty
201
+ if isinstance(search_results, list) and search_results:
202
+ print("Returning basic answer based on search results (LLM not available).")
203
+ answer_parts = []
204
+ for i, result in enumerate(search_results[:3]):
205
+ # Add check for None or non-dict result item before accessing keys
206
+ if result is None or not isinstance(result, dict):
207
+ print(f"Skipping invalid result at index {i} in basic answer formatting: {result}")
208
+ continue
209
+ if result.get('snippet'):
210
+ answer_parts.append(f"Snippet {i+1}: {result['snippet']}")
211
+ elif result.get('title'):
212
+ answer_parts.append(f"Result {i+1} Title: {result['title']}")
213
+ if answer_parts:
214
+ return "Based on web search (LLM not available):\n" + "\n".join(answer_parts)
215
+ else:
216
+ # Fallback if no useful snippets/titles found in search results
217
+ print("No useful snippets/titles found in search results.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  return "I couldn't find useful information in the search results (LLM not available)."
219
  else: # search_results is None or not a list, or empty
220
  print(f"Web search returned no results or results in invalid format. Type: {type(search_results)}")
221
  return "I couldn't find any relevant information on the web for your question."
222
 
223
+ else: # needs_search is True but no search results were returned
224
+ # This else block should ideally not be reached if needs_search is True and web_search is called
225
+ print("Question required search, but no search was performed or it failed.")
226
+ return "I couldn't perform a web search for your question."
227
+
228
+
229
+ else:
230
+ # If no search is needed, return a default or simple response
231
+ print("Question does not appear to require search. Returning fixed answer.")
232
+ fixed_answer = "How can I help you?"
233
+ return fixed_answer
234
 
235
 
236
  def run_and_submit_all( profile: gr.OAuthProfile | None, other_arg=None): # Modified to accept 2 arguments
 
256
  # 1. Instantiate Agent ( modify this part to create your agent)
257
  print("Attempting to instantiate BasicAgent...") # Debugging print before instantiation
258
  try:
 
259
  agent = BasicAgent()
260
  print("BasicAgent instantiated successfully.") # Debugging print after instantiation
261
  except Exception as e:
 
307
  continue
308
  print(f"Processing Task ID: {task_id}") # Debugging task ID
309
  try:
 
310
  submitted_answer = agent(question_text)
311
  print(f"Agent returned answer for {task_id}: {submitted_answer[:50]}...") # Debugging returned answer
312
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
 
407
  )
408
 
409
  # Ensure the app launches when the script is run
410
+ if __name__ == "__main__":
411
+ demo.launch(server_name="0.0.0.0") # Ensure binding to all interfaces