gabejavitt commited on
Commit
7d1723e
·
verified ·
1 Parent(s): 9e0417b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +728 -318
app.py CHANGED
@@ -4,7 +4,10 @@ import json
4
  import re
5
  import traceback
6
  import contextlib
7
- from typing import List, Optional
 
 
 
8
  from pathlib import Path
9
 
10
  import gradio as gr
@@ -20,11 +23,11 @@ import requests
20
 
21
  # LangChain & LangGraph
22
  from langgraph.graph.message import add_messages
23
- from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
 
24
  from langgraph.prebuilt import ToolNode
25
  from langgraph.graph import START, END, StateGraph
26
- from langchain_core.tools import tool
27
- from langchain_groq import ChatGroq # <-- Groq integration
28
 
29
  # RAG
30
  from langchain_text_splitters import RecursiveCharacterTextSplitter
@@ -35,264 +38,223 @@ from langchain_community.tools import DuckDuckGoSearchRun
35
  # =============================================================================
36
  # CONFIGURATION
37
  # =============================================================================
38
- class Config:
39
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
40
- MAX_TURNS = 20
41
- MAX_MESSAGE_LENGTH = 8000
42
- GROQ_MODEL = "llama-3.3-70b-versatile" # Groq's Llama 70B model
43
- ASR_MODEL = "openai/whisper-base"
44
- EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2"
45
- CHUNK_SIZE = 1000
46
- CHUNK_OVERLAP = 200
47
 
48
  # =============================================================================
49
  # ASR INITIALIZATION
50
  # =============================================================================
51
- class ASRManager:
52
- """Manages the Automatic Speech Recognition pipeline."""
53
-
54
- def __init__(self):
55
- self.pipeline = None
56
- self._initialize()
57
-
58
- def _initialize(self):
59
- """Initialize the ASR pipeline with proper device handling."""
60
- try:
61
- print("Initializing ASR (Whisper) pipeline...")
62
- device = 0 if torch.cuda.is_available() else -1
63
- device_name = "cuda:0" if device == 0 else "cpu"
64
- print(f"Using device: {device_name}")
65
-
66
- self.pipeline = pipeline(
67
- "automatic-speech-recognition",
68
- model=Config.ASR_MODEL,
69
- torch_dtype=torch.float16 if device == 0 else torch.float32,
70
- device=device
71
- )
72
- print("✅ ASR pipeline loaded successfully")
73
- except Exception as e:
74
- print(f"⚠️ Failed to load ASR pipeline: {e}")
75
- self.pipeline = None
76
-
77
- def transcribe(self, file_path: str) -> str:
78
- """Transcribe an audio file."""
79
- if self.pipeline is None:
80
- raise RuntimeError("ASR pipeline not available")
81
-
82
- result = self.pipeline(file_path)
83
- return result.get("text", "")
84
-
85
- # Global ASR manager
86
- asr_manager = ASRManager()
87
 
88
  # =============================================================================
89
  # UTILITY FUNCTIONS
90
  # =============================================================================
91
- class FileUtils:
92
- """Utilities for file operations."""
93
-
94
- @staticmethod
95
- def find_file(path: str) -> Optional[Path]:
96
- """Find a file by trying multiple path variations."""
97
- script_dir = Path.cwd()
98
- safe_path = Path(path).as_posix()
99
-
100
- paths_to_try = [
101
- script_dir / safe_path,
102
- Path(safe_path),
103
- script_dir / Path(path).name
104
- ]
105
-
106
- for attempt_path in paths_to_try:
107
- if attempt_path.exists():
108
- return attempt_path
109
-
110
- return None
111
-
112
- @staticmethod
113
- def truncate_if_needed(content: str, max_length: int = Config.MAX_MESSAGE_LENGTH) -> str:
114
- """Truncate content if it exceeds max length."""
115
- if len(content) > max_length:
116
- return content[:max_length] + f"\n...[truncated, {len(content)} total chars]"
117
- return content
118
-
119
- class SecurityValidator:
120
- """Validates code for security concerns."""
121
-
122
- DANGEROUS_PATTERNS = ['__import__', 'eval(', 'compile(', 'subprocess', 'os.system']
123
- WRITE_MODES = ["'w'", '"w"', "'a'", '"a"', "'wb'", '"wb"']
124
-
125
- @classmethod
126
- def validate_code(cls, code: str) -> Optional[str]:
127
- """
128
- Validate code for security issues.
129
- Returns error message if dangerous, None if safe.
130
- """
131
- code_lower = code.lower()
132
-
133
- # Check for dangerous operations
134
- for pattern in cls.DANGEROUS_PATTERNS:
135
- if pattern in code_lower:
136
- return f"Potentially dangerous operation '{pattern}' is not allowed"
137
-
138
- # Check for file writing
139
- if 'open(' in code_lower and any(mode in code for mode in cls.WRITE_MODES):
140
- return "Writing files not allowed in code_interpreter. Use write_file tool"
141
-
142
- return None
143
 
144
  # =============================================================================
145
  # TOOL DEFINITIONS
146
  # =============================================================================
147
 
148
- # --- Search Tool ---
149
  class SearchInput(BaseModel):
150
  query: str = Field(description="The search query.")
151
 
152
  @tool(args_schema=SearchInput)
153
  def search_tool(query: str) -> str:
154
- """Search the web using DuckDuckGo for recent information."""
155
- if not query or not isinstance(query, str):
156
- return "Error: 'query' must be a non-empty string"
157
 
158
- print(f"🔍 Searching: {query}")
159
  try:
160
  search = DuckDuckGoSearchRun()
161
  result = search.run(query)
162
- return FileUtils.truncate_if_needed(result)
 
 
163
  except Exception as e:
164
- return f"Search error for '{query}': {str(e)}"
 
165
 
166
- # --- Code Interpreter Tool ---
167
  class CodeInput(BaseModel):
168
- code: str = Field(description="Python code to execute (must include print() for output).")
169
 
170
  @tool(args_schema=CodeInput)
171
  def code_interpreter(code: str) -> str:
172
  """
173
- Execute Python code and return output.
174
-
175
- RULES:
176
- 1. ALWAYS use print() for output
177
- 2. Keep code simple and focused
178
- 3. Add comments to explain logic
179
- 4. Import libraries inside functions
180
- 5. Available: pandas as pd, basic Python libraries
181
  """
182
  if not isinstance(code, str):
183
- return "Error: 'code' must be a string"
184
 
185
- # Security validation
186
- error = SecurityValidator.validate_code(code)
187
- if error:
188
- return f"Error: {error}"
 
 
189
 
190
- print(f"💻 Executing code:\n{code}\n---")
 
191
 
 
192
  output_stream = io.StringIO()
193
  error_stream = io.StringIO()
194
 
195
  try:
196
- with contextlib.redirect_stdout(output_stream), \
197
- contextlib.redirect_stderr(error_stream):
198
-
199
  safe_globals = {
200
  "pd": pd,
201
  "__builtins__": __builtins__
202
  }
203
  exec(code, safe_globals, {})
204
-
205
  stdout = output_stream.getvalue()
206
  stderr = error_stream.getvalue()
207
 
208
  if stderr:
209
- return f"Error:\n{stderr}\n\nOutput:\n{stdout}"
210
 
211
  if stdout:
212
- return f"Success:\n{FileUtils.truncate_if_needed(stdout)}"
 
 
213
 
214
- return "Success: Code executed but produced no output.\n⚠️ Use print() to see results!"
215
-
216
- except Exception:
217
- return f"Execution failed:\n{traceback.format_exc()}"
 
 
218
 
219
- # --- File Operations ---
220
  class ReadFileInput(BaseModel):
221
- path: str = Field(description="Path to the file to read.")
222
 
223
  @tool(args_schema=ReadFileInput)
224
  def read_file(path: str) -> str:
225
- """Read the content of a file."""
226
- if not path or not isinstance(path, str):
227
- return "Error: 'path' must be a non-empty string"
228
 
229
- print(f"📖 Reading: {path}")
230
 
231
- file_path = FileUtils.find_file(path)
232
  if not file_path:
233
- cwd_files = list(Path.cwd().iterdir())
234
  return (f"Error: File not found: '{path}'\n"
235
- f"Files in current directory: {[f.name for f in cwd_files]}")
236
 
237
  try:
238
- # Try reading as text
239
  content = file_path.read_text(encoding='utf-8')
240
- return FileUtils.truncate_if_needed(content)
241
-
242
  except UnicodeDecodeError:
243
- # Binary file
244
  size = file_path.stat().st_size
245
  ext = file_path.suffix
246
  return (f"File appears to be binary ({size} bytes). Cannot display as text.\n"
247
  f"File type: {ext}\n"
248
  f"Consider using audio_transcription_tool for audio files.")
249
-
250
  except Exception as e:
251
  return f"Error reading file: {str(e)}"
252
 
 
253
  class WriteFileInput(BaseModel):
254
- path: str = Field(description="Path to write the file.")
255
- content: str = Field(description="Content to write.")
256
 
257
  @tool(args_schema=WriteFileInput)
258
  def write_file(path: str, content: str) -> str:
259
- """Write content to a file."""
260
- if not path or not isinstance(path, str):
261
- return "Error: 'path' must be a non-empty string"
262
  if not isinstance(content, str):
263
- return "Error: 'content' must be a string"
264
 
265
- print(f"✍️ Writing to: {path}")
266
 
267
  try:
268
  file_path = Path.cwd() / path
269
  file_path.parent.mkdir(parents=True, exist_ok=True)
270
  file_path.write_text(content, encoding='utf-8')
271
- return f"Successfully wrote {len(content)} characters to '{path}'"
272
  except Exception as e:
273
- return f"Error writing file: {str(e)}"
 
274
 
275
  class ListDirInput(BaseModel):
276
- path: str = Field(description="Directory path to list.", default=".")
277
 
278
  @tool(args_schema=ListDirInput)
279
  def list_directory(path: str = ".") -> str:
280
- """List the contents of a directory."""
281
- print(f"📁 Listing: {path}")
282
 
283
  try:
284
  dir_path = Path.cwd() / path if path != "." else Path.cwd()
285
 
286
  if not dir_path.is_dir():
287
- return f"Error: '{path}' is not a valid directory"
288
 
289
  items = sorted(dir_path.iterdir())
290
 
291
  if not items:
292
- return f"Directory '{path}' is empty"
293
 
294
- files = []
295
- directories = []
296
 
297
  for item in items:
298
  if item.is_dir():
@@ -308,48 +270,52 @@ def list_directory(path: str = ".") -> str:
308
  result += "Files:\n" + "\n".join(files)
309
 
310
  return result
311
-
312
  except Exception as e:
313
- return f"Error listing directory: {str(e)}"
 
314
 
315
- # --- Audio Transcription ---
316
  class AudioInput(BaseModel):
317
- file_path: str = Field(description="Path to the audio file.")
318
 
319
  @tool(args_schema=AudioInput)
320
  def audio_transcription_tool(file_path: str) -> str:
321
- """Transcribe an audio file to text using Whisper."""
322
- if not file_path or not isinstance(file_path, str):
323
- return "Error: 'file_path' must be a non-empty string"
324
 
325
- print(f"🎤 Transcribing: {file_path}")
326
 
327
- audio_path = FileUtils.find_file(file_path)
 
 
 
328
  if not audio_path:
329
  return f"Error: Audio file not found: '{file_path}'"
330
 
331
  try:
332
- text = asr_manager.transcribe(str(audio_path))
333
- if not text:
334
- return "Error: Transcription produced no text"
335
- return f"Transcription:\n{FileUtils.truncate_if_needed(text)}"
 
 
 
336
  except Exception as e:
337
- return f"Error transcribing: {str(e)}"
 
338
 
339
- # --- YouTube Transcript ---
340
  class YoutubeInput(BaseModel):
341
- video_url: str = Field(description="YouTube video URL.")
342
 
343
  @tool(args_schema=YoutubeInput)
344
  def get_youtube_transcript(video_url: str) -> str:
345
- """Fetch transcript/captions for a YouTube video."""
346
- if not video_url or not isinstance(video_url, str):
347
- return "Error: 'video_url' must be a non-empty string"
348
 
349
- print(f"📺 Fetching transcript: {video_url}")
350
 
351
  try:
352
- # Extract video ID
353
  video_id = None
354
  if "watch?v=" in video_url:
355
  video_id = video_url.split("v=")[1].split("&")[0]
@@ -357,46 +323,40 @@ def get_youtube_transcript(video_url: str) -> str:
357
  video_id = video_url.split("youtu.be/")[1].split("?")[0]
358
 
359
  if not video_id:
360
- return f"Error: Could not extract video ID from '{video_url}'"
361
-
362
  transcript_list = YouTubeTranscriptApi.get_transcript(video_id)
363
 
364
  if not transcript_list:
365
- return "Error: No transcript found"
366
-
367
- full_transcript = " ".join(item["text"] for item in transcript_list)
368
- return f"YouTube Transcript:\n{FileUtils.truncate_if_needed(full_transcript)}"
369
-
370
  except Exception as e:
371
- return f"Error getting transcript: {str(e)}"
 
372
 
373
- # --- RAG-Based Web Scraper ---
374
  class ScrapeInput(BaseModel):
375
- url: str = Field(description="URL to scrape (http:// or https://).")
376
- query: str = Field(description="Question to answer from the page.")
377
 
378
  @tool(args_schema=ScrapeInput)
379
  def scrape_and_retrieve(url: str, query: str) -> str:
380
  """
381
- Scrape a webpage and use RAG to find relevant information.
382
-
383
- Args:
384
- url: The URL to scrape
385
- query: The specific question to answer
386
  """
387
- if not url.lower().startswith(('http://', 'https://')):
388
  return f"Error: Invalid URL. Must start with http:// or https://. Got: '{url}'"
389
  if not query:
390
- return "Error: A query is required"
391
 
392
  # Access global agent for RAG components
393
- if not hasattr(scrape_and_retrieve, 'embeddings'):
394
- return "Error: RAG components not initialized"
395
-
396
- print(f"🌐 Scraping: {url} for query: {query}")
397
 
398
  try:
399
- # Scrape webpage
400
  headers = {
401
  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
402
  }
@@ -404,156 +364,606 @@ def scrape_and_retrieve(url: str, query: str) -> str:
404
  response.raise_for_status()
405
 
406
  soup = BeautifulSoup(response.text, 'html.parser')
407
-
408
- # Remove non-content elements
409
  for tag in soup(["script", "style", "nav", "footer", "aside", "header"]):
410
  tag.extract()
411
 
412
- # Extract main content
413
  main_content = soup.find('main') or soup.find('article') or soup.body
414
  if not main_content:
415
- return "Error: Could not find main content"
416
 
417
  text = main_content.get_text(separator='\n', strip=True)
418
- text = '\n'.join(line.strip() for line in text.splitlines() if line.strip())
419
 
420
  if not text:
421
- return "Error: No content extracted"
422
 
423
- # Split text into chunks
424
  docs = scrape_and_retrieve.text_splitter.create_documents([text])
425
  if not docs:
426
- return "Error: Could not split text into documents"
427
-
428
- # Create vector store and retrieve
429
  db = FAISS.from_documents(docs, scrape_and_retrieve.embeddings)
430
  retriever = db.as_retriever(search_kwargs={"k": 5})
431
  retrieved_docs = retriever.invoke(query)
432
 
433
  if not retrieved_docs:
434
- return "Error: No relevant information found"
435
-
436
- # Format results
437
- context = "\n\n---\n\n".join(doc.page_content for doc in retrieved_docs)
438
- return f"Relevant Context from {url} for '{query}':\n\n{context}"
439
-
440
  except Exception as e:
441
- return f"Error scraping {url}: {str(e)}\n{traceback.format_exc()}"
 
 
442
 
443
- # --- Final Answer Tool ---
444
  class FinalAnswerInput(BaseModel):
445
- answer: str = Field(description="The final, definitive answer.")
446
 
447
  @tool(args_schema=FinalAnswerInput)
448
  def final_answer_tool(answer: str) -> str:
449
  """
450
- Call this ONLY when you have the final answer.
451
- The answer must be EXACTLY what was asked for, with no extra text.
452
  """
453
  if not isinstance(answer, str):
454
- answer = str(answer)
 
 
 
455
 
456
- print(f" FINAL ANSWER: {answer}")
 
457
  return answer
458
 
 
459
  # =============================================================================
460
- # AGENT CLASS
461
  # =============================================================================
462
- class GroqAgent:
463
- """Agent powered by Groq's Llama 70B model."""
464
-
465
- def __init__(self, api_key: str):
466
- self.api_key = api_key
467
-
468
- # Initialize LLM
469
- self.llm = ChatGroq(
470
- api_key=api_key,
471
- model=Config.GROQ_MODEL,
472
- temperature=0.1,
473
- max_tokens=4096
474
- )
475
-
476
- # Initialize RAG components
477
- self.embeddings = HuggingFaceEmbeddings(
478
- model_name=Config.EMBEDDING_MODEL
479
- )
480
- self.text_splitter = RecursiveCharacterTextSplitter(
481
- chunk_size=Config.CHUNK_SIZE,
482
- chunk_overlap=Config.CHUNK_OVERLAP
483
- )
484
-
485
- # Attach RAG components to scraper tool
486
- scrape_and_retrieve.embeddings = self.embeddings
487
- scrape_and_retrieve.text_splitter = self.text_splitter
488
-
489
- # Define tools
490
- self.tools = [
491
- search_tool,
492
- code_interpreter,
493
- read_file,
494
- write_file,
495
- list_directory,
496
- audio_transcription_tool,
497
- get_youtube_transcript,
498
- scrape_and_retrieve,
499
- final_answer_tool
500
- ]
501
-
502
- # Bind tools to LLM
503
- self.llm_with_tools = self.llm.bind_tools(self.tools)
504
-
505
- # Build graph
506
- self.graph = self._build_graph()
507
-
508
- def _build_graph(self) -> StateGraph:
509
- """Build the LangGraph state graph."""
510
- # TODO: Implement graph building logic
511
- pass
512
-
513
- def run(self, user_input: str) -> str:
514
- """Run the agent on user input."""
515
- # TODO: Implement agent execution logic
516
- pass
 
 
 
 
 
 
 
 
 
 
 
 
517
 
518
  # =============================================================================
519
- # GRADIO INTERFACE
520
  # =============================================================================
521
- def create_interface():
522
- """Create the Gradio interface."""
523
-
524
- def chat(message, history, groq_api_key):
525
- if not groq_api_key:
526
- return "Please provide a Groq API key"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
527
 
 
 
 
 
 
 
 
 
528
  try:
529
- agent = GroqAgent(api_key=groq_api_key)
530
- response = agent.run(message)
531
- return response
532
- except Exception as e:
533
- return f"Error: {str(e)}"
534
-
535
- with gr.Blocks(title="Groq Llama 70B Agent") as demo:
536
- gr.Markdown("# 🚀 Groq Llama 70B Agentic Assistant")
537
- gr.Markdown("Powered by Groq's ultra-fast Llama 70B model")
538
-
539
- with gr.Row():
540
- api_key_input = gr.Textbox(
541
- label="Groq API Key",
542
- type="password",
543
- placeholder="Enter your Groq API key..."
544
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545
 
546
- chatbot = gr.Chatbot(label="Chat", height=500)
547
- msg = gr.Textbox(
548
- label="Message",
549
- placeholder="Ask me anything...",
550
- lines=2
 
 
 
 
 
551
  )
552
 
553
- msg.submit(chat, [msg, chatbot, api_key_input], chatbot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554
 
555
- return demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
556
 
557
  if __name__ == "__main__":
558
- demo = create_interface()
559
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import re
5
  import traceback
6
  import contextlib
7
+ import uuid
8
+ import time
9
+ import ast
10
+ from typing import List, Optional, TypedDict, Annotated
11
  from pathlib import Path
12
 
13
  import gradio as gr
 
23
 
24
  # LangChain & LangGraph
25
  from langgraph.graph.message import add_messages
26
+ from langchain_core.messages import HumanMessage, AIMessage, ToolMessage, SystemMessage, AnyMessage
27
+ from langchain_core.tools import tool, ToolCall
28
  from langgraph.prebuilt import ToolNode
29
  from langgraph.graph import START, END, StateGraph
30
+ from langchain_groq import ChatGroq
 
31
 
32
  # RAG
33
  from langchain_text_splitters import RecursiveCharacterTextSplitter
 
38
  # =============================================================================
39
  # CONFIGURATION
40
  # =============================================================================
41
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
42
+ MAX_TURNS = 20
43
+ MAX_MESSAGE_LENGTH = 8000
 
 
 
 
 
 
44
 
45
  # =============================================================================
46
  # ASR INITIALIZATION
47
  # =============================================================================
48
+ asr_pipeline = None
49
+ try:
50
+ print("Loading ASR (Whisper) pipeline globally...")
51
+ device = 0 if torch.cuda.is_available() else -1
52
+ device_name = "cuda:0" if device == 0 else "cpu"
53
+ print(f"Attempting to use device: {device_name} for ASR.")
54
+ asr_pipeline = pipeline(
55
+ "automatic-speech-recognition",
56
+ model="openai/whisper-base",
57
+ torch_dtype=torch.float16 if device == 0 else torch.float32,
58
+ device=device
59
+ )
60
+ print(" ASR (Whisper) pipeline loaded successfully.")
61
+ except Exception as e:
62
+ print(f"⚠️ Warning: Could not load ASR pipeline globally. Error: {e}")
63
+ asr_pipeline = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  # =============================================================================
66
  # UTILITY FUNCTIONS
67
  # =============================================================================
68
+ def remove_fences_simple(text):
69
+ """Remove code fences from text."""
70
+ original_text = text
71
+ text = text.strip()
72
+ if text.startswith("```") and text.endswith("```"):
73
+ text = text[3:-3].strip()
74
+ if '\n' in text:
75
+ first_line, rest = text.split('\n', 1)
76
+ if first_line.strip().replace('_','').isalnum() and len(first_line.strip()) < 15:
77
+ text = rest.strip()
78
+ return text
79
+ return original_text
80
+
81
+ def truncate_if_needed(content: str, max_length: int = MAX_MESSAGE_LENGTH) -> str:
82
+ """Truncate content if it exceeds max length."""
83
+ if len(content) > max_length:
84
+ return content[:max_length] + f"\n...[truncated, {len(content)} total chars]"
85
+ return content
86
+
87
+ def find_file(path: str) -> Optional[Path]:
88
+ """Find a file by trying multiple path variations."""
89
+ script_dir = Path.cwd()
90
+ safe_path = Path(path).as_posix()
91
+
92
+ paths_to_try = [
93
+ script_dir / safe_path,
94
+ Path(safe_path),
95
+ script_dir / Path(path).name
96
+ ]
97
+
98
+ for attempt_path in paths_to_try:
99
+ if attempt_path.exists():
100
+ return attempt_path
101
+
102
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
  # =============================================================================
105
  # TOOL DEFINITIONS
106
  # =============================================================================
107
 
 
108
  class SearchInput(BaseModel):
109
  query: str = Field(description="The search query.")
110
 
111
  @tool(args_schema=SearchInput)
112
  def search_tool(query: str) -> str:
113
+ """Calls DuckDuckGo search and returns the results. Use this for recent information or general web searches."""
114
+ if not isinstance(query, str) or not query.strip():
115
+ return "Error: Invalid input. 'query' must be a non-empty string."
116
 
117
+ print(f"--- Calling Search Tool with query: {query} ---")
118
  try:
119
  search = DuckDuckGoSearchRun()
120
  result = search.run(query)
121
+ if len(result) > MAX_MESSAGE_LENGTH:
122
+ result = result[:MAX_MESSAGE_LENGTH] + f"\n...[truncated, {len(result)} total chars]"
123
+ return result
124
  except Exception as e:
125
+ return f"Error running search for '{query}': {str(e)}"
126
+
127
 
 
128
  class CodeInput(BaseModel):
129
+ code: str = Field(description="The Python code to execute, which must include a print() statement for output.")
130
 
131
  @tool(args_schema=CodeInput)
132
  def code_interpreter(code: str) -> str:
133
  """
134
+ Executes a string of Python code and returns its stdout, stderr, and any error.
135
+ CRITICAL RULES:
136
+ 1. ALWAYS use print() to output your final answer.
137
+ 2. Write simple, focused code. One task per execution.
138
+ 3. Add comments (#) to explain your logic.
139
+ 4. SCOPE RULE: Import all necessary libraries inside any function you define.
140
+ Available: pandas as pd, basic Python libraries.
 
141
  """
142
  if not isinstance(code, str):
143
+ return "Error: Invalid input. 'code' must be a string."
144
 
145
+ # Basic safety checks
146
+ dangerous_patterns = ['__import__', 'eval(', 'compile(', 'subprocess', 'os.system']
147
+ code_lower = code.lower()
148
+ for pattern in dangerous_patterns:
149
+ if pattern in code_lower:
150
+ return f"Error: Potentially dangerous operation '{pattern}' is not allowed."
151
 
152
+ if 'open(' in code_lower and any(mode in code for mode in ["'w'", '"w"', "'a'", '"a"', "'wb'", '"wb"']):
153
+ return "Error: Writing files is not allowed in code_interpreter. Use write_file tool instead."
154
 
155
+ print(f"--- Calling Code Interpreter ---\nCode:\n{code}\n---")
156
  output_stream = io.StringIO()
157
  error_stream = io.StringIO()
158
 
159
  try:
160
+ with contextlib.redirect_stdout(output_stream), contextlib.redirect_stderr(error_stream):
 
 
161
  safe_globals = {
162
  "pd": pd,
163
  "__builtins__": __builtins__
164
  }
165
  exec(code, safe_globals, {})
166
+
167
  stdout = output_stream.getvalue()
168
  stderr = error_stream.getvalue()
169
 
170
  if stderr:
171
+ return f"Error in execution:\n{stderr}\n\nStdout (if any):\n{stdout}"
172
 
173
  if stdout:
174
+ if len(stdout) > MAX_MESSAGE_LENGTH:
175
+ stdout = stdout[:MAX_MESSAGE_LENGTH] + f"\n...[truncated, {len(stdout)} total chars]"
176
+ return f"Success:\n{stdout}"
177
 
178
+ return "Success: Code executed without error but produced no output.\n⚠️ Remember to use print() to output your results!"
179
+
180
+ except Exception as e:
181
+ tb_str = traceback.format_exc()
182
+ return f"Execution failed:\n{tb_str}"
183
+
184
 
 
185
  class ReadFileInput(BaseModel):
186
+ path: str = Field(description="The path to the file to read.")
187
 
188
  @tool(args_schema=ReadFileInput)
189
  def read_file(path: str) -> str:
190
+ """Reads the content of a file at the specified path."""
191
+ if not isinstance(path, str) or not path.strip():
192
+ return "Error: Invalid input. 'path' must be a non-empty string."
193
 
194
+ print(f"--- Calling Read File Tool: {path} ---")
195
 
196
+ file_path = find_file(path)
197
  if not file_path:
198
+ cwd_files = os.listdir(".")
199
  return (f"Error: File not found: '{path}'\n"
200
+ f"Files in current directory: {cwd_files}")
201
 
202
  try:
 
203
  content = file_path.read_text(encoding='utf-8')
204
+ return truncate_if_needed(content)
 
205
  except UnicodeDecodeError:
 
206
  size = file_path.stat().st_size
207
  ext = file_path.suffix
208
  return (f"File appears to be binary ({size} bytes). Cannot display as text.\n"
209
  f"File type: {ext}\n"
210
  f"Consider using audio_transcription_tool for audio files.")
 
211
  except Exception as e:
212
  return f"Error reading file: {str(e)}"
213
 
214
+
215
  class WriteFileInput(BaseModel):
216
+ path: str = Field(description="The path of the file to write to.")
217
+ content: str = Field(description="The content to write into the file.")
218
 
219
  @tool(args_schema=WriteFileInput)
220
  def write_file(path: str, content: str) -> str:
221
+ """Writes content to a file at the specified path."""
222
+ if not isinstance(path, str) or not path.strip():
223
+ return "Error: Invalid input. 'path' must be a non-empty string."
224
  if not isinstance(content, str):
225
+ return "Error: Invalid input. 'content' must be a string."
226
 
227
+ print(f"--- Calling Write File Tool: {path} ---")
228
 
229
  try:
230
  file_path = Path.cwd() / path
231
  file_path.parent.mkdir(parents=True, exist_ok=True)
232
  file_path.write_text(content, encoding='utf-8')
233
+ return f"Successfully wrote {len(content)} characters to '{path}'."
234
  except Exception as e:
235
+ return f"Error writing file '{path}': {str(e)}"
236
+
237
 
238
  class ListDirInput(BaseModel):
239
+ path: str = Field(description="The directory path to list.", default=".")
240
 
241
  @tool(args_schema=ListDirInput)
242
  def list_directory(path: str = ".") -> str:
243
+ """Lists the contents of a directory."""
244
+ print(f"--- Calling List Directory Tool: {path} ---")
245
 
246
  try:
247
  dir_path = Path.cwd() / path if path != "." else Path.cwd()
248
 
249
  if not dir_path.is_dir():
250
+ return f"Error: '{path}' is not a valid directory."
251
 
252
  items = sorted(dir_path.iterdir())
253
 
254
  if not items:
255
+ return f"Directory '{path}' is empty."
256
 
257
+ files, directories = [], []
 
258
 
259
  for item in items:
260
  if item.is_dir():
 
270
  result += "Files:\n" + "\n".join(files)
271
 
272
  return result
 
273
  except Exception as e:
274
+ return f"Error listing directory '{path}': {str(e)}"
275
+
276
 
 
277
  class AudioInput(BaseModel):
278
+ file_path: str = Field(description="The file path of the audio to transcribe.")
279
 
280
  @tool(args_schema=AudioInput)
281
  def audio_transcription_tool(file_path: str) -> str:
282
+ """Transcribes an audio file to text using Whisper."""
283
+ if not isinstance(file_path, str) or not file_path.strip():
284
+ return "Error: Invalid input. 'file_path' must be a non-empty string."
285
 
286
+ print(f"--- Calling Audio Transcription: {file_path} ---")
287
 
288
+ if asr_pipeline is None:
289
+ return "Error: ASR pipeline is not available."
290
+
291
+ audio_path = find_file(file_path)
292
  if not audio_path:
293
  return f"Error: Audio file not found: '{file_path}'"
294
 
295
  try:
296
+ transcription = asr_pipeline(str(audio_path))
297
+ result_text = transcription.get("text", "")
298
+
299
+ if not result_text:
300
+ return "Error: Transcription produced no text."
301
+
302
+ return f"Transcription:\n{truncate_if_needed(result_text)}"
303
  except Exception as e:
304
+ return f"Error transcribing '{file_path}': {str(e)}"
305
+
306
 
 
307
  class YoutubeInput(BaseModel):
308
+ video_url: str = Field(description="The URL of the YouTube video.")
309
 
310
  @tool(args_schema=YoutubeInput)
311
  def get_youtube_transcript(video_url: str) -> str:
312
+ """Fetches the transcript/captions for a YouTube video."""
313
+ if not isinstance(video_url, str) or not video_url.strip():
314
+ return "Error: Invalid input. 'video_url' must be a non-empty string."
315
 
316
+ print(f"--- Calling YouTube Transcript: {video_url} ---")
317
 
318
  try:
 
319
  video_id = None
320
  if "watch?v=" in video_url:
321
  video_id = video_url.split("v=")[1].split("&")[0]
 
323
  video_id = video_url.split("youtu.be/")[1].split("?")[0]
324
 
325
  if not video_id:
326
+ return f"Error: Could not extract YouTube video ID from '{video_url}'."
327
+
328
  transcript_list = YouTubeTranscriptApi.get_transcript(video_id)
329
 
330
  if not transcript_list:
331
+ return "Error: No transcript found for this video."
332
+
333
+ full_transcript = " ".join([item["text"] for item in transcript_list])
334
+ return f"YouTube Transcript:\n{truncate_if_needed(full_transcript)}"
 
335
  except Exception as e:
336
+ return f"Error getting transcript for '{video_url}': {str(e)}"
337
+
338
 
 
339
  class ScrapeInput(BaseModel):
340
+ url: str = Field(description="The URL to scrape (must start with http:// or https://).")
341
+ query: str = Field(description="The specific question to answer or information to find on the page.")
342
 
343
  @tool(args_schema=ScrapeInput)
344
  def scrape_and_retrieve(url: str, query: str) -> str:
345
  """
346
+ Scrapes a webpage, chunks its content, and performs RAG search.
 
 
 
 
347
  """
348
+ if not (url.lower().startswith(('http://', 'https://'))):
349
  return f"Error: Invalid URL. Must start with http:// or https://. Got: '{url}'"
350
  if not query:
351
+ return "Error: A query is required to search the page content."
352
 
353
  # Access global agent for RAG components
354
+ if not hasattr(scrape_and_retrieve, 'embeddings') or not hasattr(scrape_and_retrieve, 'text_splitter'):
355
+ return "Error: RAG components are not initialized."
356
+
357
+ print(f"--- Calling RAG Scraper: {url} for query: {query} ---")
358
 
359
  try:
 
360
  headers = {
361
  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
362
  }
 
364
  response.raise_for_status()
365
 
366
  soup = BeautifulSoup(response.text, 'html.parser')
 
 
367
  for tag in soup(["script", "style", "nav", "footer", "aside", "header"]):
368
  tag.extract()
369
 
 
370
  main_content = soup.find('main') or soup.find('article') or soup.body
371
  if not main_content:
372
+ return "Error: Could not find main content on the page."
373
 
374
  text = main_content.get_text(separator='\n', strip=True)
375
+ text = '\n'.join(chunk for chunk in (line.strip() for line in text.splitlines()) if chunk)
376
 
377
  if not text:
378
+ return "Error: Scraped content was empty."
379
 
 
380
  docs = scrape_and_retrieve.text_splitter.create_documents([text])
381
  if not docs:
382
+ return "Error: Text could not be split into documents."
383
+
 
384
  db = FAISS.from_documents(docs, scrape_and_retrieve.embeddings)
385
  retriever = db.as_retriever(search_kwargs={"k": 5})
386
  retrieved_docs = retriever.invoke(query)
387
 
388
  if not retrieved_docs:
389
+ return "Error: No relevant information found on the page for that query."
390
+
391
+ context = "\n\n---\n\n".join([doc.page_content for doc in retrieved_docs])
392
+ return f"Relevant Context from {url} for query '{query}':\n\n{context}"
393
+
 
394
  except Exception as e:
395
+ tb_str = traceback.format_exc()
396
+ return f"Error scraping or retrieving from {url}: {str(e)}\n{tb_str}"
397
+
398
 
 
399
  class FinalAnswerInput(BaseModel):
400
+ answer: str = Field(description="The final, definitive answer to the question.")
401
 
402
  @tool(args_schema=FinalAnswerInput)
403
  def final_answer_tool(answer: str) -> str:
404
  """
405
+ Call this tool ONLY when you have the final, definitive answer.
406
+ The 'answer' must be EXACTLY what was asked for, with no extra text.
407
  """
408
  if not isinstance(answer, str):
409
+ try:
410
+ answer = str(answer)
411
+ except:
412
+ return "Error: Invalid input. 'answer' must be a string."
413
 
414
+ print(f"--- FINAL ANSWER TOOL CALLED ---")
415
+ print(f"Answer: {answer}")
416
  return answer
417
 
418
+
419
  # =============================================================================
420
+ # FALLBACK PARSER
421
  # =============================================================================
422
+ def parse_tool_call_from_string(content: str, tools: List) -> List[ToolCall]:
423
+ """
424
+ Parses malformed tool call strings from an LLM response.
425
+ """
426
+ print(f"Original LLM content for fallback parsing:\n---\n{content}\n---")
427
+ tool_name = None
428
+ tool_input = None
429
+ cleaned_str = None
430
+
431
+ # STRATEGY 1: Try to parse <function(tool_name)>...{json_string}...
432
+ func_match = re.search(
433
+ r"<function[(=]\s*([^)]+)\s*[)>](.*)",
434
+ content,
435
+ re.DOTALL | re.IGNORECASE
436
+ )
437
+
438
+ if func_match:
439
+ try:
440
+ tool_name = func_match.group(1).strip().replace("'", "").replace('"', '')
441
+ remaining_content = func_match.group(2)
442
+
443
+ json_start_index = remaining_content.find('{')
444
+ if json_start_index != -1:
445
+ json_str = remaining_content[json_start_index:]
446
+ cleaned_str = json_str.strip()
447
+ cleaned_str = ''.join(c for c in cleaned_str if c.isprintable() or c in '\n\r\t')
448
+ cleaned_str = cleaned_str.strip().rstrip(',')
449
+
450
+ tool_input = json.loads(cleaned_str)
451
+ print(f"🔧 Fallback (Format 1 - json.loads): Parsed tool call for '{tool_name}'")
452
+ else:
453
+ print(f"⚠️ Fallback (Format 1): Found <function> but no JSON blob.")
454
+ tool_name = None
455
+
456
+ except json.JSONDecodeError as e:
457
+ print(f"⚠️ Fallback (Format 1): json.loads failed: {e}. Trying ast.literal_eval.")
458
+ try:
459
+ if cleaned_str:
460
+ potential_input = ast.literal_eval(cleaned_str)
461
+ if isinstance(potential_input, dict):
462
+ tool_input = potential_input
463
+ print(f"🔧 Fallback (Format 1 - ast.literal_eval): Parsed tool call for '{tool_name}'")
464
+ else:
465
+ print(f"⚠️ Fallback (Format 1): ast.literal_eval did not produce a dict.")
466
+ tool_name = None
467
+ else:
468
+ tool_name = None
469
+ except:
470
+ tool_name = None
471
+
472
+ # FINAL VALIDATION
473
+ if tool_name and tool_input is not None:
474
+ if any(t.name == tool_name for t in tools):
475
+ tool_call = ToolCall(
476
+ name=tool_name,
477
+ args=tool_input,
478
+ id=str(uuid.uuid4())
479
+ )
480
+ print(f"✅ Successfully created tool call: {tool_name}")
481
+ return [tool_call]
482
+ else:
483
+ print(f"❌ Tool '{tool_name}' not found in available tools")
484
+ print(f" Available: {[t.name for t in tools]}")
485
+
486
+ print("❌ Failed to parse any valid tool call from content")
487
+ return []
488
+
489
 
490
  # =============================================================================
491
+ # DEFINED TOOLS LIST
492
  # =============================================================================
493
+ defined_tools = [
494
+ search_tool,
495
+ code_interpreter,
496
+ read_file,
497
+ write_file,
498
+ list_directory,
499
+ audio_transcription_tool,
500
+ get_youtube_transcript,
501
+ scrape_and_retrieve,
502
+ final_answer_tool
503
+ ]
504
+
505
+
506
+ # =============================================================================
507
+ # AGENT STATE
508
+ # =============================================================================
509
+ class AgentState(TypedDict):
510
+ messages: Annotated[List[AnyMessage], add_messages]
511
+ turn: int
512
+
513
+
514
+ # =============================================================================
515
+ # CONDITIONAL EDGE FUNCTION
516
+ =============================================================================
517
+ def should_continue(state: AgentState):
518
+ """
519
+ Decide whether to continue, call tools, or end.
520
+ """
521
+ last_message = state['messages'][-1]
522
+ current_turn = state.get('turn', 0)
523
+
524
+ # 1. Check for final_answer_tool
525
+ if isinstance(last_message, AIMessage) and last_message.tool_calls:
526
+ for tool_call in last_message.tool_calls:
527
+ if tool_call.get("name") == "final_answer_tool":
528
+ print("--- Condition: final_answer_tool called, ending. ---")
529
+ return END
530
+
531
+ # 2. Check turn limit
532
+ if current_turn >= MAX_TURNS:
533
+ print(f"--- Condition: Max turns ({MAX_TURNS}) reached. Ending. ---")
534
+ return END
535
+
536
+ # 3. Route to tools if tool calls exist
537
+ if isinstance(last_message, AIMessage) and last_message.tool_calls:
538
+ print("--- Condition: Tools called, routing to tools node. ---")
539
+ return "tools"
540
+
541
+ # 4. Loop prevention
542
+ if len(state['messages']) > 2 and isinstance(last_message, AIMessage) and isinstance(state['messages'][-2], AIMessage):
543
+ print(f"--- Condition: Detected 2+ consecutive AI messages (Turn {current_turn}). Ending to prevent loop. ---")
544
+ return END
545
+
546
+ # 5. Loop back to agent
547
+ print(f"--- Condition: No tool call (Turn {current_turn}). Continuing to agent. ---")
548
+ return "agent"
549
+
550
+
551
+ # =============================================================================
552
+ # BASIC AGENT CLASS
553
+ # =============================================================================
554
+ class BasicAgent:
555
+ def __init__(self):
556
+ print("BasicAgent (Single LLM) initializing...")
557
 
558
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
559
+ if not GROQ_API_KEY:
560
+ raise ValueError("GROQ_API_KEY environment variable is not set!")
561
+
562
+ self.tools = defined_tools
563
+
564
+ # Initialize RAG Components
565
+ print("Initializing RAG components...")
566
  try:
567
+ self.embeddings = HuggingFaceEmbeddings(
568
+ model_name="sentence-transformers/all-MiniLM-L6-v2",
569
+ model_kwargs={'device': 'cpu'}
570
+ )
571
+ self.text_splitter = RecursiveCharacterTextSplitter(
572
+ chunk_size=1000,
573
+ chunk_overlap=200
 
 
 
 
 
 
 
 
574
  )
575
+
576
+ # Attach to scraper tool
577
+ scrape_and_retrieve.embeddings = self.embeddings
578
+ scrape_and_retrieve.text_splitter = self.text_splitter
579
+
580
+ print("✅ RAG components initialized.")
581
+ except Exception as e:
582
+ print(f"⚠️ Warning: Could not initialize RAG components. Error: {e}")
583
+ self.embeddings = None
584
+ self.text_splitter = None
585
+
586
+ # Build tool descriptions
587
+ tool_desc_list = []
588
+ for tool in self.tools:
589
+ if tool.args_schema:
590
+ schema = tool.args_schema.model_json_schema()
591
+ args_desc = []
592
+ for prop, details in schema.get('properties', {}).items():
593
+ desc = details.get('description', '')
594
+ args_desc.append(f" - {prop}: {desc}")
595
+ args_str = "\n".join(args_desc)
596
+ desc = f"- {tool.name}:\n {tool.description}\n Args:\n{args_str}"
597
+ else:
598
+ desc = f"- {tool.name}: {tool.description}"
599
+ tool_desc_list.append(desc)
600
+ tool_descriptions = "\n".join(tool_desc_list)
601
+
602
+ # System Prompt
603
+ self.system_prompt = f"""You are a highly intelligent AI assistant for the GAIA benchmark.
604
+ Your goal: Provide the EXACT answer in the EXACT format requested.
605
+
606
+ **PROTOCOL:**
607
+ 1. **ANALYZE:** Read the question and history. What is the next logical step?
608
+ 2. **ACT:** Call ONE tool to get information or perform a calculation.
609
+ 3. **EVALUATE:** Look at the tool's output. Do you have the final answer?
610
+ - **If NO:** Go back to Step 1 and decide the *next* step.
611
+ - **If YES:** Call final_answer_tool immediately with the answer.
612
+
613
+ **CRITICAL RULES:**
614
+ - **TOOL USE:** You MUST use tools to find the answer. Do NOT use your own knowledge.
615
+ - **FINAL ANSWER:** When you have the answer, use final_answer_tool. The 'answer' argument must be the answer ONLY (e.g., "42", "red, blue, green").
616
+ - **JSON FORMAT:** All tool calls MUST be in this exact JSON format:
617
+ {{"name": "tool_name", "arguments": {{"key": "value"}}}}
618
+
619
+ **EXAMPLE: CODE INTERPRETER**
620
+ {{"name": "code_interpreter", "arguments": {{"code": "print(1 + 1)"}}}}
621
+
622
+ **EXAMPLE: FINAL ANSWER**
623
+ {{"name": "final_answer_tool", "arguments": {{"answer": "28"}}}}
624
+
625
+ **TOOLS:**
626
+ {tool_descriptions}
627
+
628
+ **REMEMBER:** One step at a time. Use tools. Format JSON correctly.
629
+ """
630
+
631
+ print("Initializing Groq LLM...")
632
+ try:
633
+ self.llm_with_tools = ChatGroq(
634
+ temperature=0,
635
+ groq_api_key=GROQ_API_KEY,
636
+ model_name="llama-3.3-70b-versatile",
637
+ max_tokens=4096,
638
+ timeout=60
639
+ ).bind_tools(self.tools)
640
+ print("✅ Main LLM (llama-3.3-70b-versatile with tools) initialized.")
641
+
642
+ except Exception as e:
643
+ print(f"❌ Error initializing Groq: {e}")
644
+ raise
645
+
646
+ # Agent Node
647
+ def agent_node(state: AgentState):
648
+ current_turn = state.get('turn', 0) + 1
649
+ print(f"\n{'='*60}")
650
+ print(f"AGENT TURN {current_turn}/{MAX_TURNS}")
651
+ print('='*60)
652
+
653
+ if current_turn > MAX_TURNS:
654
+ return {"messages": [SystemMessage(content="Max turns reached.")]}
655
+
656
+ max_retries = 3
657
+ ai_message = None
658
+ for attempt in range(max_retries):
659
+ try:
660
+ ai_message = self.llm_with_tools.invoke(state["messages"])
661
+ break
662
+ except Exception as e:
663
+ print(f"⚠️ LLM attempt {attempt+1}/{max_retries} failed: {e}")
664
+ if attempt == max_retries - 1:
665
+ ai_message = AIMessage(
666
+ content=f"Error: LLM failed after {max_retries} attempts: {e}"
667
+ )
668
+ time.sleep(2 ** attempt)
669
+
670
+ # Fallback Parsing Logic
671
+ if not ai_message.tool_calls and isinstance(ai_message.content, str) and ai_message.content.strip():
672
+ parsed_tool_calls = parse_tool_call_from_string(ai_message.content, self.tools)
673
+ if parsed_tool_calls:
674
+ print("🔧 Fallback SUCCESS: Rebuilding tool call(s).")
675
+ ai_message.tool_calls = parsed_tool_calls
676
+ ai_message.content = ""
677
+ else:
678
+ print(f"⚠️ Fallback FAILED: Could not parse any tool call from content:\n{ai_message.content[:200]}...")
679
+
680
+ if ai_message.tool_calls:
681
+ print(f"🔧 Agent Tool Call: {ai_message.tool_calls[0]['name']}")
682
+ else:
683
+ print(f"💭 Agent Reasoning: {ai_message.content[:200]}...")
684
+
685
+ return {"messages": [ai_message], "turn": current_turn}
686
+
687
+ # Tool Node
688
+ tool_node = ToolNode(self.tools)
689
+
690
+ # Build Graph
691
+ print("Building Single-Agent graph...")
692
+ graph_builder = StateGraph(AgentState)
693
+
694
+ graph_builder.add_node("agent", agent_node)
695
+ graph_builder.add_node("tools", tool_node)
696
 
697
+ graph_builder.add_edge(START, "agent")
698
+
699
+ graph_builder.add_conditional_edges(
700
+ "agent",
701
+ should_continue,
702
+ {
703
+ "tools": "tools",
704
+ "agent": "agent",
705
+ END: END
706
+ }
707
  )
708
 
709
+ graph_builder.add_edge("tools", "agent")
710
+
711
+ self.graph = graph_builder.compile()
712
+ print("✅ Single-Agent graph compiled successfully.")
713
+
714
+ def __call__(self, question: str) -> str:
715
+ print(f"\n--- Starting Agent Run for Question ---")
716
+ print(f"Agent received question (first 100 chars): {question[:100]}...")
717
+
718
+ graph_input = {
719
+ "messages": [
720
+ SystemMessage(content=self.system_prompt),
721
+ HumanMessage(content=question)
722
+ ],
723
+ "turn": 0
724
+ }
725
+
726
+ final_answer = "AGENT FAILED TO PRODUCE ANSWER"
727
+ try:
728
+ config = {"recursion_limit": MAX_TURNS + 5}
729
+ for event in self.graph.stream(graph_input, stream_mode="values", config=config):
730
+
731
+ if event.get('messages'): # Ensure messages exist
732
+ last_message = event["messages"][-1]
733
+ else:
734
+ continue # Skip if no messages yet
735
+
736
+ # Check for final answer extraction
737
+ if isinstance(last_message, AIMessage) and last_message.tool_calls:
738
+ if last_message.tool_calls[0].get("name") == "final_answer_tool":
739
+ final_answer_args = last_message.tool_calls[0].get('args', {})
740
+ if 'answer' in final_answer_args:
741
+ final_answer = final_answer_args['answer']
742
+ print(f"--- Final Answer Captured from tool call: '{final_answer}' ---")
743
+ break
744
+ else:
745
+ print(f"⚠️ Final Answer tool called without 'answer' argument: {final_answer_args}")
746
+ final_answer = "ERROR: FINAL_ANSWER_TOOL CALLED WITHOUT ANSWER"
747
+ break
748
+
749
+ elif isinstance(last_message, ToolMessage):
750
+ print(f"Tool Result ({last_message.tool_call_id}): {last_message.content[:500]}...")
751
+ elif isinstance(last_message, AIMessage) and not last_message.tool_calls:
752
+ print(f"AI Message (Reasoning): {last_message.content[:500]}...")
753
+ elif isinstance(last_message, SystemMessage):
754
+ print(f"System Message: {last_message.content[:500]}...")
755
+
756
+
757
+ # --- Final Answer Cleaning ---
758
+ cleaned_answer = str(final_answer).strip()
759
+ prefixes_to_remove = ["The answer is:", "Here is the answer:", "Based on the information:", "Final Answer:", "Answer:"]
760
+ original_cleaned = cleaned_answer
761
+ for prefix in prefixes_to_remove:
762
+ if cleaned_answer.lower().startswith(prefix.lower()):
763
+ potential_answer = cleaned_answer[len(prefix):].strip()
764
+ if potential_answer:
765
+ cleaned_answer = potential_answer
766
+ break
767
+
768
+ cleaned_answer = remove_fences_simple(cleaned_answer)
769
+ if cleaned_answer.startswith("`") and cleaned_answer.endswith("`"):
770
+ cleaned_answer = cleaned_answer[1:-1].strip()
771
+
772
+ print(f"Agent returning final answer (cleaned): '{cleaned_answer}'")
773
+ return cleaned_answer
774
+
775
+ except Exception as e:
776
+ print(f"Error running agent graph: {e}")
777
+ tb_str = traceback.format_exc()
778
+ print(tb_str)
779
+ return f"AGENT GRAPH ERROR: {e}"
780
+
781
+
782
+ # ====================================================
783
+ # --- Global Agent Instantiation ---
784
+
785
+ try:
786
+ agent = BasicAgent()
787
+ print("✅ Global BasicAgent instantiated successfully.")
788
+ if asr_pipeline is None: print("⚠️ Global ASR Pipeline failed load.")
789
+ except Exception as e:
790
+ print(f"❌ FATAL: Could not instantiate global agent: {e}")
791
+ traceback.print_exc()
792
+ agent = None
793
+
794
+ # ====================================================
795
+ # --- (Original Template Code - Mock Questions Version) ---
796
+ def run_and_submit_all( profile: gr.OAuthProfile | None): # Corrected type hint
797
+ """
798
+ Fetches MOCK questions, runs the BasicAgent on them, simulates submission prep,
799
+ and displays the results. DOES NOT SUBMIT.
800
+ """
801
+ space_id = os.getenv("SPACE_ID")
802
+ username = profile.username if profile else "local_test_user"
803
+ print(f"User: {username}{'' if profile else ' (dummy)'}")
804
 
805
+ # Check if global agent initialized
806
+ if not agent:
807
+ return "FATAL ERROR: Global agent failed to initialize. Check logs.", None
808
+
809
+ print("Using globally instantiated agent.")
810
+ agent_code = f"httpsS://huggingface.co/spaces/{space_id}/tree/main" if space_id else "local_run" # Corrected URL
811
+ print(f"Agent code URL: {agent_code}")
812
+ print("--- USING MOCK QUESTIONS ---")
813
+
814
+ # --- MOCK QUESTIONS ---
815
+ #
816
+ # vvv PASTE YOUR FULL LIST OF 20 MOCK QUESTIONS HERE vvv
817
+ #
818
+ mock_questions_data = [
819
+ {
820
+ "task_id": "mock_level1_001",
821
+ "question": r"""Here's a fun riddle that I'd like you to try.\n\nAn adventurer exploring an ancient tomb came across a horde of gold coins, all neatly stacked in columns. As he reached to scoop them into his backpack, a mysterious voice filled the room. \"You have fallen for my trap adventurer,\" the voice began, and suddenly the doorway to the chamber was sealed by a heavy rolling disk of stone. The adventurer tried to move the stone disk but was unable to budge the heavy stone. Trapped, he was startled when the voice again spoke. \n\n\"If you solve my riddle, I will reward you with a portion of my riches, but if you are not clever, you will never leave this treasure chamber. Before you are 200 gold coins. I pose a challenge to you, adventurer. Within these stacks of coins, all but 30 are face-up. You must divide the coins into two piles, one is yours, and one is mine. You may place as many coins as you like in either pile. You may flip any coins over, but you may not balance any coins on their edges. For every face-down coin in your pile, you will be rewarded with two gold coins. But be warned, if both piles do not contain the same number of face-down coins, the door will remain sealed for all eternity!\"\n\nThe adventurer smiled, as this would be an easy task. All he had to do was flip over every coin so it was face down, and he would win the entire treasure! As he moved to the columns of coins, however, the light suddenly faded, and he was left in total darkness. The adventurer reached forward and picked up one of the coins, and was shocked when he realized that both sides felt almost the same. Without the light, he was unable to determine which side of the coin was heads and which side was tails. He carefully replaced the coin in its original orientation and tried to think of a way to solve the puzzle. Finally, out of desperation, the adventurer removed 30 coins to create his pile. He then carefully flipped over each coin in his pile, so its orientation was inverted from its original state.\n\n\"I've finished,\" he said, and the lights returned. Looking at the two piles, he noticed that the larger pile contained 14 face-down coins.\n\nWhat was the outcome for the adventurer? If he failed the challenge, please respond with \"The adventurer died.\" Otherwise, please provide the number of coins the adventurer won at the conclusion of the riddle. If the adventurer won any coins, provide your response as the number of coins, with no other text."""
822
+ },
823
+ {
824
+ "task_id": "mock_level1_002",
825
+ "question": r"""If you use some of the letters in the given Letter Bank to spell out the sentence "I am a penguin halfway to the moon", which of the remaining unused letters would have to be changed to spell out, "The moon is made of cheese"? Return a comma-separated alphabetized list.\nLetter Bank: {OAMFETIMPECRFSHTDNIWANEPNOFAAIYOOMGUTNAHHLNEHCME}"""
826
+ },
827
+ {
828
+ "task_id": "mock_level1_003",
829
+ "question": r"""A data annotator stayed up too late creating test questions to check that a system was working properly and submitted several questions with mathematical errors. On nights when they created 15 test questions, they made 1 error. On nights when they created fewer than 15 questions, they also corrected 3 errors. On nights they created 20 questions, they made 0 errors. On nights when they created 25 or more, they made 4 errors. Over the course of five nights, the worker produced a total of 6 errors. When asked how many nights they created 15 questions, they gave three possible numbers as responses. What are the three numbers, presented in the format x, y, z in ascending order?"""
830
+ },
831
+ {
832
+ "task_id": "mock_level1_004",
833
+ "question": r"""Please solve the following crossword:\n\n|1|2|3|4|5|\n|6| | | | |\n|7| | | | |\n|8| | | | |\n|X|9| | | |\n\nI have indicated by numbers where the hints start, so you should replace numbers and spaces by the answers.\nAnd X denotes a black square that isn\u2019t to fill.\n\nACROSS\n- 1 Wooden strips on a bed frame\n- 6 _ Minhaj, Peabody-winning comedian for "Patriot Act"\n- 7 Japanese city of 2.6+ million\n- 8 Stopwatch, e.g.\n- 9 Pain in the neck\n\nDOWN\n- 1 Quick drink of whiskey\n- 2 Eye procedure\n- 3 "Same here," in a three-word phrase\n- 4 Already occupied, as a seat\n- 5 Sarcastically critical commentary. Answer by concatenating the characters you choose to fill the crossword, in row-major order."""
834
+ },
835
+ {
836
+ "task_id": "mock_level1_005",
837
+ "question": r"""I wanted to make another batch of cherry melomel. I remember liking the last recipe I tried, but I can't remember it off the top of my head. It was from the Reddit, r/mead. I remember that the user who made it had a really distinct name, I think it was StormBeforeDawn. Could you please look up the recipe for me? I'm not sure if it has been changed, so please make sure that the recipe you review wasn't updated after July 14, 2022. That's the last time I tried the recipe.\n\nWhat I want to know is how many cherries I'm supposed to use. I'm making a 10-gallon batch in two 5-gallon carboys. Please just respond with the integer number of pounds of whole cherries with pits that are supposed to be used for a 10-gallon batch."""
838
+ },
839
+ {
840
+ "task_id": "mock_level1_006",
841
+ "question": r"""Verify each of the following ISBN 13 numbers:\n\n1. 9783518188156\n2. 9788476540746\n3. 9788415091004\n4. 9788256014590\n5. 9782046407331\n\nIf any are invalid, correct them by changing the final digit. Then, return the list, comma separated, in the same order as in the question."""
842
+ },
843
+ {
844
+ "task_id": "mock_level1_007",
845
+ "question": r"""A porterhouse by any other name is centered around a letter. What does Three Dog Night think about the first natural number that starts with that letter? Give the first line from the lyrics that references it."""
846
+ },
847
+ {
848
+ "task_id": "mock_level1_008",
849
+ "question": r"""Bob has genome type Aa, and Linda has genome type Aa. Assuming that a child of theirs also has a child with someone who also has genome type Aa, what is the probability that Bob and Linda's grandchild will have Genome type Aa? Write the answer as a percentage, rounding to the nearest integer if necessary."""
850
+ },
851
+ {
852
+ "task_id": "mock_level1_009",
853
+ "question": r"""An array of candy is set out to choose from including gumballs, candy corn, gumdrops, banana taffy, chocolate chips, and gummy bears. There is one bag of each type of candy. The gumballs come in red, orange, yellow, green, blue, and brown. The candy corn is yellow, white, and orange. The gumdrops are red, green, purple, yellow, and orange. The banana taffy is yellow. The chocolate chips are brown and white. The gummy bears are red, green, yellow, and orange. Five people pass through and each selects one bag. The first selects one with only primary colors. The second selects one with no primary colors. The third selects one with all the primary colors. The fourth selects one that has neither the most nor the least colors of the remaining bags. The fifth selects the one with their favorite color, green. A second bag of the candy the first person chose is added to the remaining bag of candy. Which two candies are in the remaining bag after the addition? Give me them in a comma separated list, in alphabetical order"""
854
+ },
855
+ {
856
+ "task_id": "mock_level1_010",
857
+ "question": r"""In the year 2020, where were koi fish found in the watershed with the id 02040203? Give only the name of the pond, lake, or stream where the fish were found, and not the name of the city or county."""
858
+ },
859
+ {
860
+ "task_id": "mock_level1_011",
861
+ "question": r"""In Sonia Sanchez\u2019s poem \u201cfather\u2019s voice\u201d, what primary colour is evoked by the imagery in the beginning of the tenth stanza? Answer with a capitalized word."""
862
+ },
863
+ {
864
+ "task_id": "mock_level1_012",
865
+ "question": r"""According to Papers with Code, what was the name of the first model to go beyond 70% of accuracy on ImageNet ?"""
866
+ },
867
+ {
868
+ "task_id": "mock_level1_013",
869
+ "question": r"""What is the dimension of the boundary of the tame twindragon rounded to two decimal places?"""
870
+ },
871
+ {
872
+ "task_id": "mock_level1_014",
873
+ "question": r"""In what year was the home village of the subject of British Museum item #Bb,11.118 founded?"""
874
+ },
875
+ {
876
+ "task_id": "mock_level1_015",
877
+ "question": r"""What is the ISSN of the journal that included G. Scott's potato article that mentioned both a fast food restaurant and a Chinese politician in the title in a 2012 issue?"""
878
+ },
879
+ {
880
+ "task_id": "mock_level1_016",
881
+ "question": r"""VNV Nation has a song that shares its title with the nickname of Louis XV. What album was it released with?"""
882
+ },
883
+ {
884
+ "task_id": "mock_level1_017",
885
+ "question": r"""If I combine a Beatle's first name and a type of beer, in what category and year of Nobel Prize do I have a winner? Answer using the format CATEGORY, YEAR."""
886
+ },
887
+ {
888
+ "task_id": "mock_level1_018",
889
+ "question": r"""In the version of NumPy where the numpy.msort function was deprecated, which attribute was added to the numpy.polynomial package's polynomial classes?"""
890
+ },
891
+ {
892
+ "task_id": "mock_level1_019",
893
+ "question": r"""A word meaning dramatic or theatrical forms a species of duck when appended with two letters and then duplicated. What is that word?"""
894
+ },
895
+ {
896
+ "task_id": "mock_level1_020",
897
+ "question": r"""As of August 2023, how many in-text citations on the West African Vodun Wikipedia page reference a source that was cited using Scopus?"""
898
+ }
899
+ ]
900
+
901
+ questions_data = mock_questions_data
902
+ print(f"Using {len(questions_data)} mock questions.")
903
+
904
+ results_log, answers_payload = [], []
905
+ print(f"Running agent on {len(questions_data)} mock questions...")
906
+
907
+ for i, item in enumerate(questions_data):
908
+ task_id, question_text = item.get("task_id"), item.get("question")
909
+ if not task_id or question_text is None: print(f"Skipping mock item {i+1}"); continue
910
+
911
+ print(f"\n--- Running Mock Task {i+1} (ID: {task_id}) ---")
912
+ try:
913
+ file_path = item.get("file_path")
914
+ question_text_with_context = question_text
915
+ if file_path:
916
+ question_text_with_context = f"{question_text}\n\n[Attached File: {file_path}]"
917
+ print(f"Q includes file: {file_path}")
918
+
919
+ submitted_answer = agent(question_text_with_context)
920
+ submitted_answer_str = str(submitted_answer) if submitted_answer is not None else ""
921
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer_str})
922
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer_str})
923
+ print(f"--- Mock Task {task_id} Complete ---")
924
+ except Exception as e:
925
+ print(f"FATAL ERROR on mock task {task_id}: {e}")
926
+ import traceback; traceback.print_exc()
927
+ submitted_answer = f"AGENT CRASH: {e}"
928
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
929
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
930
+
931
+ if not answers_payload: return "Agent produced no answers.", pd.DataFrame(results_log)
932
+
933
+ status_update = f"Finished mock run. Processed {len(answers_payload)} answers for '{username}'."
934
+ print(status_update); print("--- MOCK RUN - SUBMISSION SKIPPED ---")
935
+ final_status = "--- Mock RUN COMPLETE ---\n" + status_update + "\nSubmission SKIPPED." # Corrected typo
936
+ results_df = pd.DataFrame(results_log); results_df['Correct'] = 'N/A (Mock)'
937
+ return final_status, results_df
938
+
939
+
940
+ # --- Build Gradio Interface ---
941
+ with gr.Blocks() as demo:
942
+ gr.Markdown("# GAIA Agent - MOCK TEST (Groq Llama3.1)")
943
+ gr.Markdown("""
944
+ **Instructions:** Click 'Run Mock Evaluation'.
945
+ **Notes:** Uses Groq (Llama-3.3-70b Executor). Ensure `GROQ_API_KEY` secret/env var exists. **DOES NOT** fetch official Qs or submit. Check logs for details.
946
+ """)
947
+ gr.LoginButton()
948
+ run_button = gr.Button("Run Mock Evaluation")
949
+ status_output = gr.Textbox(label="Run Status / Mock Result", lines=5, interactive=False)
950
+ results_table = gr.DataFrame(label="Mock Qs, Agent Answers, Results", wrap=True)
951
+ run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
952
 
953
  if __name__ == "__main__":
954
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
955
+ space_host_startup = os.getenv("SPACE_ID"); space_id_startup = os.getenv("SPACE_ID") # Corrected variable name
956
+ if space_host_startup: print(f"✅ SPACE_HOST: {space_host_startup}\n Runtime URL: https://{space_host_startup}.hf.space")
957
+ else: print("ℹ️ No SPACE_HOST (local?).")
958
+ if space_id_startup: print(f"✅ SPACE_ID: {space_id_startup}\n Repo URL: https://huggingface.co/spaces/{space_id_startup}\n Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
959
+ else: print("ℹ️ No SPACE_ID (local?).")
960
+ try: script_dir = os.path.dirname(os.path.realpath(__file__))
961
+ except NameError: script_dir = os.getcwd()
962
+ print(f"Script directory: {script_dir}")
963
+ print(f"CWD: {os.getcwd()}")
964
+ try: print("Files in CWD:", os.listdir("."))
965
+ except FileNotFoundError: print("Warning: CWD listing failed.")
966
+ print("-"*(60 + len(" App Starting ")) + "\n")
967
+ print("Launching Gradio Interface...")
968
+ demo.queue().launch(debug=True, share=False)
969
+