gabejavitt commited on
Commit
40475ed
·
verified ·
1 Parent(s): ddd60f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +520 -66
app.py CHANGED
@@ -37,6 +37,526 @@ from langchain_community.embeddings import HuggingFaceEmbeddings
37
  from langchain_community.tools import DuckDuckGoSearchRun
38
  from langchain_core.documents import Document
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  # =============================================================================
41
  # CONFIGURATION
42
  # =============================================================================
@@ -1179,72 +1699,6 @@ Turn 7: final_answer_tool("1.796 trillion")
1179
  print("✅ Planning & Reflection Agent graph compiled successfully.")
1180
 
1181
 
1182
- def __call__(self, question: str) -> str:
1183
- print(f"\n--- Starting Agent Run for Question ---")
1184
- print(f"Agent received question (first 100 chars): {question[:100]}...")
1185
-
1186
- graph_input = {
1187
- "messages": [
1188
- SystemMessage(content=self.system_prompt),
1189
- HumanMessage(content=question)
1190
- ],
1191
- "turn": 0
1192
- }
1193
-
1194
- final_answer = "AGENT FAILED TO PRODUCE ANSWER"
1195
- try:
1196
- config = {"recursion_limit": MAX_TURNS + 5}
1197
- for event in self.graph.stream(graph_input, stream_mode="values", config=config):
1198
-
1199
- if event.get('messages'): # Ensure messages exist
1200
- last_message = event["messages"][-1]
1201
- else:
1202
- continue # Skip if no messages yet
1203
-
1204
- # Check for final answer extraction
1205
- if isinstance(last_message, AIMessage) and last_message.tool_calls:
1206
- if last_message.tool_calls[0].get("name") == "final_answer_tool":
1207
- final_answer_args = last_message.tool_calls[0].get('args', {})
1208
- if 'answer' in final_answer_args:
1209
- final_answer = final_answer_args['answer']
1210
- print(f"--- Final Answer Captured from tool call: '{final_answer}' ---")
1211
- break
1212
- else:
1213
- print(f"⚠️ Final Answer tool called without 'answer' argument: {final_answer_args}")
1214
- final_answer = "ERROR: FINAL_ANSWER_TOOL CALLED WITHOUT ANSWER"
1215
- break
1216
-
1217
- elif isinstance(last_message, ToolMessage):
1218
- print(f"Tool Result ({last_message.tool_call_id}): {last_message.content[:500]}...")
1219
- elif isinstance(last_message, AIMessage) and not last_message.tool_calls:
1220
- print(f"AI Message (Reasoning): {last_message.content[:500]}...")
1221
- elif isinstance(last_message, SystemMessage):
1222
- print(f"System Message: {last_message.content[:500]}...")
1223
-
1224
-
1225
- # --- Final Answer Cleaning ---
1226
- cleaned_answer = str(final_answer).strip()
1227
- prefixes_to_remove = ["The answer is:", "Here is the answer:", "Based on the information:", "Final Answer:", "Answer:"]
1228
- original_cleaned = cleaned_answer
1229
- for prefix in prefixes_to_remove:
1230
- if cleaned_answer.lower().startswith(prefix.lower()):
1231
- potential_answer = cleaned_answer[len(prefix):].strip()
1232
- if potential_answer:
1233
- cleaned_answer = potential_answer
1234
- break
1235
-
1236
- cleaned_answer = remove_fences_simple(cleaned_answer)
1237
- if cleaned_answer.startswith("`") and cleaned_answer.endswith("`"):
1238
- cleaned_answer = cleaned_answer[1:-1].strip()
1239
-
1240
- print(f"Agent returning final answer (cleaned): '{cleaned_answer}'")
1241
- return cleaned_answer
1242
-
1243
- except Exception as e:
1244
- print(f"Error running agent graph: {e}")
1245
- tb_str = traceback.format_exc()
1246
- print(tb_str)
1247
- return f"AGENT GRAPH ERROR: {e}"
1248
 
1249
 
1250
  # =============================================================================
 
37
  from langchain_community.tools import DuckDuckGoSearchRun
38
  from langchain_core.documents import Document
39
 
40
+
41
+ # =============================================================================
42
+ # CONFIGURATION
43
+ # =============================================================================
44
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
45
+ MAX_TURNS = 25 # Increased for planning/reflection
46
+ MAX_MESSAGE_LENGTH = 8000
47
+ REFLECT_EVERY_N_TURNS = 5
48
+
49
+ # =============================================================================
50
+ # GLOBAL RAG COMPONENTS
51
+ # =============================================================================
52
+ global_embeddings = None
53
+ global_text_splitter = None
54
+
55
+ def initialize_rag_components():
56
+ """Initialize RAG components globally."""
57
+ global global_embeddings, global_text_splitter
58
+
59
+ if global_embeddings is None:
60
+ print("Initializing RAG embeddings...")
61
+ try:
62
+ global_embeddings = HuggingFaceEmbeddings(
63
+ model_name="sentence-transformers/all-MiniLM-L6-v2",
64
+ model_kwargs={'device': 'cpu'}
65
+ )
66
+ print("✅ Embeddings initialized.")
67
+ except Exception as e:
68
+ print(f"⚠️ Failed to initialize embeddings: {e}")
69
+ return False
70
+
71
+ if global_text_splitter is None:
72
+ print("Initializing text splitter...")
73
+ global_text_splitter = RecursiveCharacterTextSplitter(
74
+ chunk_size=1000,
75
+ chunk_overlap=200,
76
+ length_function=len,
77
+ separators=["\n\n", "\n", ". ", " ", ""]
78
+ )
79
+ print("✅ Text splitter initialized.")
80
+
81
+ return True
82
+
83
+ # =============================================================================
84
+ # ASR INITIALIZATION
85
+ # =============================================================================
86
+ asr_pipeline = None
87
+ try:
88
+ print("Loading ASR (Whisper) pipeline globally...")
89
+ device = 0 if torch.cuda.is_available() else -1
90
+ device_name = "cuda:0" if device == 0 else "cpu"
91
+ print(f"Attempting to use device: {device_name} for ASR.")
92
+ asr_pipeline = pipeline(
93
+ "automatic-speech-recognition",
94
+ model="openai/whisper-base",
95
+ torch_dtype=torch.float16 if device == 0 else torch.float32,
96
+ device=device
97
+ )
98
+ print("✅ ASR (Whisper) pipeline loaded successfully.")
99
+ except Exception as e:
100
+ print(f"⚠️ Warning: Could not load ASR pipeline globally. Error: {e}")
101
+ asr_pipeline = None
102
+
103
+ # =============================================================================
104
+ # UTILITY FUNCTIONS
105
+ # =============================================================================
106
+ def remove_fences_simple(text):
107
+ """Remove code fences from text."""
108
+ original_text = text
109
+ text = text.strip()
110
+ if text.startswith("```") and text.endswith("```"):
111
+ text = text[3:-3].strip()
112
+ if '\n' in text:
113
+ first_line, rest = text.split('\n', 1)
114
+ if first_line.strip().replace('_','').isalnum() and len(first_line.strip()) < 15:
115
+ text = rest.strip()
116
+ return text
117
+ return original_text
118
+
119
+ def truncate_if_needed(content: str, max_length: int = MAX_MESSAGE_LENGTH) -> str:
120
+ """Truncate content if it exceeds max length."""
121
+ if len(content) > max_length:
122
+ return content[:max_length] + f"\n...[truncated, {len(content)} total chars]"
123
+ return content
124
+
125
+ def find_file(path: str) -> Optional[Path]:
126
+ """Find a file by trying multiple path variations."""
127
+ script_dir = Path.cwd()
128
+ safe_path = Path(path).as_posix()
129
+
130
+ paths_to_try = [
131
+ script_dir / safe_path,
132
+ Path(safe_path),
133
+ script_dir / Path(path).name
134
+ ]
135
+
136
+ for attempt_path in paths_to_try:
137
+ if attempt_path.exists():
138
+ return attempt_path
139
+
140
+ return None
141
+
142
+ # =============================================================================
143
+ # PLANNING & REFLECTION TOOLS
144
+ # =============================================================================
145
+
146
+ class ThinkInput(BaseModel):
147
+ reasoning: str = Field(description="Your step-by-step reasoning for a logic puzzle (keep under 200 chars)")
148
+
149
+ @tool(args_schema=ThinkInput)
150
+ def think_through_logic(reasoning: str) -> str:
151
+ """
152
+ Use this to work through logic puzzles, riddles, or reasoning problems.
153
+
154
+ Call this when:
155
+ - The question is a riddle or brain teaser
156
+ - You need to reason through a logical problem
157
+ - No external information is needed, just thinking
158
+
159
+ After thinking through the logic, use calculator if math is involved,
160
+ then validate_answer and final_answer_tool.
161
+
162
+ NOTE: Keep reasoning summary brief (under 200 chars).
163
+ """
164
+ print(f"🧠 Thinking through logic: {reasoning[:100]}...")
165
+
166
+ return f"""✅ Logic reasoning recorded: {reasoning}
167
+
168
+ Now:
169
+ 1. If there's any math to calculate, use calculator()
170
+ 2. Once you have the answer, call validate_answer()
171
+ 3. Then call final_answer_tool() with just the answer"""
172
+
173
+
174
+ class PlanInput(BaseModel):
175
+ question: str = Field(description="Brief summary of the task (keep under 100 chars)")
176
+
177
+ @tool(args_schema=PlanInput)
178
+ def create_plan(question: str) -> str:
179
+ """
180
+ Creates a step-by-step plan for answering a question.
181
+ CRITICAL: Call this FIRST for any multi-step or complex question.
182
+
183
+ This helps you think through:
184
+ 1. What information do you need?
185
+ 2. In what order should you gather it?
186
+ 3. What tools will you use?
187
+
188
+ After calling this, execute the plan step-by-step.
189
+
190
+ NOTE: Keep the question summary brief (under 100 chars) to avoid errors.
191
+ """
192
+ print(f"📋 Planning phase initiated for: {question[:100]}...")
193
+
194
+ return f"""✅ Plan Created. Now execute these steps methodically:
195
+
196
+ PLANNING FRAMEWORK:
197
+ 1. GOAL: What exact answer format is needed?
198
+ 2. REQUIREMENTS: What data/information is required?
199
+ 3. STRATEGY: What's the most efficient path?
200
+ 4. EXECUTION: List concrete actions in order
201
+
202
+ Now proceed with Step 1 of your plan."""
203
+
204
+
205
+ class ReflectInput(BaseModel):
206
+ current_situation: str = Field(description="What you've tried so far (keep brief, under 100 chars)")
207
+
208
+ @tool(args_schema=ReflectInput)
209
+ def reflect_on_progress(current_situation: str) -> str:
210
+ """
211
+ Reflects on your progress and suggests what to do next.
212
+
213
+ Call this when:
214
+ - You feel stuck or uncertain
215
+ - Tools keep failing
216
+ - You're not making progress
217
+ - You've taken 5+ steps without getting closer to the answer
218
+
219
+ This helps you step back and reconsider your approach.
220
+
221
+ NOTE: Keep the situation summary brief (under 100 chars).
222
+ """
223
+ print(f"🤔 Reflection initiated: {current_situation[:100]}...")
224
+
225
+ return f"""🔍 REFLECTION ANALYSIS:
226
+
227
+ Current situation: {current_situation}
228
+
229
+ CRITICAL QUESTIONS TO ASK YOURSELF:
230
+ 1. Have I gathered the information I actually need?
231
+ 2. Am I using the right tools for this task?
232
+ 3. Am I going in circles (repeating similar actions)?
233
+ 4. Should I try a completely different approach?
234
+ 5. Do I have enough information to answer now?
235
+
236
+ NEXT STEPS:
237
+ - If stuck: Try a different tool or search query
238
+ - If missing info: Identify exactly what's missing
239
+ - If have info: Proceed to final_answer_tool
240
+ - If uncertain: Break problem into smaller pieces
241
+
242
+ Take a different approach now."""
243
+
244
+
245
+ class ValidateInput(BaseModel):
246
+ proposed_answer: str = Field(description="The answer you plan to submit")
247
+ original_question: str = Field(description="The original question")
248
+
249
+ @tool(args_schema=ValidateInput)
250
+ def validate_answer(proposed_answer: str, original_question: str) -> str:
251
+ """
252
+ Validates your proposed answer before submission.
253
+ CRITICAL: ALWAYS call this before final_answer_tool.
254
+
255
+ Checks:
256
+ - Does the answer match what was asked?
257
+ - Is it in the correct format?
258
+ - Are there any obvious issues?
259
+
260
+ If validation passes, then call final_answer_tool.
261
+ If validation fails, gather more information or correct the format.
262
+ """
263
+ print(f"✓ Validating answer: '{proposed_answer[:50]}...'")
264
+
265
+ issues = []
266
+ warnings = []
267
+
268
+ # Check for conversational fluff
269
+ fluff_phrases = ["the answer is", "based on", "according to", "i found that", "here is", "final answer"]
270
+ if any(phrase in proposed_answer.lower() for phrase in fluff_phrases):
271
+ issues.append("❌ Remove conversational text. Provide ONLY the answer.")
272
+
273
+ # Check for number format if question asks for numbers
274
+ number_keywords = ["how many", "what number", "count", "total", "sum"]
275
+ if any(kw in original_question.lower() for kw in number_keywords):
276
+ if not any(char.isdigit() for char in proposed_answer):
277
+ warnings.append("⚠️ Question seems to ask for a number, but answer contains no digits.")
278
+
279
+ # Check for list format
280
+ if "list" in original_question.lower() and "," not in proposed_answer:
281
+ warnings.append("⚠️ Question asks for a list, consider comma-separated format.")
282
+
283
+ # Check for yes/no questions
284
+ if original_question.lower().strip().startswith(("is ", "are ", "was ", "were ", "do ", "does ", "did ", "can ", "will ")):
285
+ if proposed_answer.lower() not in ["yes", "no", "true", "false"]:
286
+ warnings.append("⚠️ This looks like a yes/no question. Consider simple yes/no answer.")
287
+
288
+ # Check for code fences or markdown
289
+ if "```" in proposed_answer:
290
+ issues.append("❌ Remove code fences (```) from the answer.")
291
+
292
+ # Check length
293
+ if len(proposed_answer) > 500:
294
+ warnings.append("⚠️ Answer is quite long. Are you sure this is just the answer and not an explanation?")
295
+
296
+ if issues:
297
+ return "🚫 VALIDATION FAILED:\n" + "\n".join(issues) + "\n\nFix these issues before calling final_answer_tool."
298
+
299
+ if warnings:
300
+ return "⚠️ VALIDATION WARNINGS:\n" + "\n".join(warnings) + "\n\nConsider these points, but you may proceed if confident."
301
+
302
+ return "✅ VALIDATION PASSED: Answer looks good! Proceed with final_answer_tool now."
303
+
304
+
305
+ # =============================================================================
306
+ # CORE TOOLS
307
+ # =============================================================================
308
+
309
+ class SearchInput(BaseModel):
310
+ query: str = Field(description="The search query.")
311
+
312
+ @tool(args_schema=SearchInput)
313
+ def search_tool(query: str) -> str:
314
+ """
315
+ Searches the web using DuckDuckGo.
316
+ Use for: recent information, facts, general web searches.
317
+
318
+ Tips:
319
+ - Keep queries concise and specific
320
+ - Include year for time-sensitive queries (e.g., "GDP Brazil 2016")
321
+ - Try different phrasings if first search doesn't help
322
+ """
323
+ if not isinstance(query, str) or not query.strip():
324
+ return "Error: Invalid input. 'query' must be a non-empty string."
325
+
326
+ print(f"🔍 Searching: {query}")
327
+ try:
328
+ search = DuckDuckGoSearchRun()
329
+ result = search.run(query)
330
+ if len(result) > MAX_MESSAGE_LENGTH:
331
+ result = result[:MAX_MESSAGE_LENGTH] + f"\n...[truncated, {len(result)} total chars]"
332
+ return result
333
+ except Exception as e:
334
+ return f"Error running search for '{query}': {str(e)}"
335
+
336
+
337
+ class CalcInput(BaseModel):
338
+ expression: str = Field(description="Mathematical expression to evaluate (e.g., '2 + 2', 'sqrt(16)', '45 * 1.2')")
339
+
340
+ @tool(args_schema=CalcInput)
341
+ def calculator(expression: str) -> str:
342
+ """
343
+ Evaluates mathematical expressions.
344
+ Use this for ANY calculations instead of code_interpreter.
345
+
346
+ Supports: +, -, *, /, **, sqrt, sin, cos, tan, log, exp, pi, e, abs, round
347
+
348
+ Examples:
349
+ - calculator("127 * 83")
350
+ - calculator("sqrt(144)")
351
+ - calculator("(45 + 23) / 2")
352
+ """
353
+ if not isinstance(expression, str) or not expression.strip():
354
+ return "Error: Invalid expression."
355
+
356
+ print(f"🧮 Calculating: {expression}")
357
+
358
+ try:
359
+ # Create safe namespace with math functions
360
+ import math
361
+ safe_dict = {
362
+ 'sqrt': math.sqrt, 'sin': math.sin, 'cos': math.cos, 'tan': math.tan,
363
+ 'log': math.log, 'log10': math.log10, 'exp': math.exp,
364
+ 'pi': math.pi, 'e': math.e, 'abs': abs, 'round': round,
365
+ 'pow': pow, 'sum': sum, 'min': min, 'max': max
366
+ }
367
+
368
+ result = eval(expression, {"__builtins__": {}}, safe_dict)
369
+ return f"{result}"
370
+ except Exception as e:
371
+ return f"Error evaluating '{expression}': {str(e)}\nMake sure to use proper syntax (e.g., sqrt(16), not sqrt 16)"
372
+
373
+
374
+ class CodeInput(BaseModel):
375
+ code: str = Field(description="Python code to execute. MUST include print() for output.")
376
+
377
+ @tool(args_schema=CodeInput)
378
+ def code_interpreter(code: str) -> str:
379
+ """
380
+ Executes Python code for complex data processing.
381
+
382
+ WHEN TO USE:
383
+ - Data analysis (CSV, Excel files)
384
+ - Complex calculations with loops/conditionals
385
+ - String manipulation
386
+ - Date/time calculations
387
+
388
+ WHEN NOT TO USE:
389
+ - Simple math (use calculator instead)
390
+ - Web searches (use search_tool)
391
+
392
+ Available libraries: pandas as pd, numpy as np, json, re, datetime
393
+
394
+ CRITICAL: Always use print() to output results!
395
+ """
396
+ if not isinstance(code, str):
397
+ return "Error: Invalid input. 'code' must be a string."
398
+
399
+ # Safety checks
400
+ dangerous_patterns = ['__import__', 'eval(', 'compile(', 'subprocess', 'os.system', 'exec(']
401
+ code_lower = code.lower()
402
+ for pattern in dangerous_patterns:
403
+ if pattern in code_lower:
404
+ return f"Error: Potentially dangerous operation '{pattern}' is not allowed."
405
+
406
+ if 'open(' in code_lower and any(mode in code for mode in ["'w'", '"w"', "'a'", '"a"', "'wb'", '"wb"']):
407
+ return "Error: Writing files is not allowed in code_interpreter. Use write_file tool instead."
408
+
409
+ print(f"💻 Executing code...")
410
+ output_stream = io.StringIO()
411
+ error_stream = io.StringIO()
412
+
413
+ try:
414
+ with contextlib.redirect_stdout(output_stream), contextlib.redirect_stderr(error_stream):
415
+ safe_globals = {
416
+ "pd": pd,
417
+ "np": np,
418
+ "json": json,
419
+ "re": re,
420
+ "__builtins__": __builtins__
421
+ }
422
+ exec(code, safe_globals, {})
423
+
424
+ stdout = output_stream.getvalue()
425
+ stderr = error_stream.getvalue()
426
+
427
+ if stderr:
428
+ return f"Error in execution:\n{stderr}\n\nStdout (if any):\n{stdout}"
429
+
430
+ if stdout:
431
+ if len(stdout) > MAX_MESSAGE_LENGTH:
432
+ stdout = stdout[:MAX_MESSAGE_LENGTH] + f"\n...[truncated, {len(stdout)} total chars]"
433
+ return f"{stdout}"
434
+
435
+ return "Code executed but produced no output. Remember to use print() to display results!"
436
+
437
+ except Exception as e:
438
+ tb_str = traceback.format_exc()
439
+ return f"Execution failed:\n{tb_str}"
440
+
441
+
442
+ class ReadFileInput(BaseModel):
443
+ path: str = Field(description="Path to the file to read")
444
+
445
+ @tool(args_schema=ReadFileInput)
446
+ def read_file(path: str) -> str:
447
+ """Reads a file from the filesystem."""
448
+ if not isinstance(path, str) or not path.strip():
449
+ return "Error: Invalid input. 'path' must be a non-empty string."
450
+
451
+ print(f"📄 Reading file: {path}")
452
+
453
+ file_path = find_file(path)
454
+ if not file_path:
455
+ cwd_files = os.listdir(".")
456
+ return (f"Error: File not found: '{path}'\n"
457
+ f"Files in current directory: {cwd_files}")
458
+
459
+ try:
460
+ content = file_path.read_text(encoding='utf-8')
461
+ return truncate_if_needed(content)
462
+ except UnicodeDecodeError:
463
+ size = file_path.stat().st_size
464
+ ext = file_path.suffix
465
+ return (f"File appears to be binary ({size} bytes). Cannot display as text.\n"
466
+ f"File type: {ext}\n"
467
+ f"Consider using audio_transcription_tool for audio files.")
468
+ except Exception as e:
469
+ return f"Error reading file: {str(e)}"
470
+
471
+
472
+ class WriteFileInput(BaseModel):
473
+ path: str = Field(description="Path where file should be written")
474
+ content: str = Field(description="Content to write to the file")
475
+
476
+ @tool(args_schema=WriteFileInput)
477
+ def write_file(path: str, content: str) -> str:
478
+ """Writes content to a file."""
479
+ if not isinstance(path, str) or not path.strip():
480
+ return "Error: Invalid input. 'path' must be a non-empty string."
481
+ if not isinstance(content, str):
482
+ return "Error: Invalid input. 'content' must be a string."
483
+
484
+ print(f"✍️ Writing file: {path}")
485
+
486
+ try:
487
+ file_path = Path.cwd() / path
488
+ file_path.parent.mkdir(parents=True, exist_ok=True)
489
+ file_path.write_text(content, encoding='utf-8')
490
+ return f"Successfully wrote {len(content)} characters to '{path}'."
491
+ except Exception as e:
492
+ return f"Error writing file '{path}': {str(e)}"
493
+
494
+
495
+ class ListDirInput(BaseModel):
496
+ path: str = Field(description="Directory path to list", default=".")
497
+
498
+ @tool(args_schema=ListDirInput)
499
+ def list_directory(path: str = ".") -> str:
500
+ """Lists files and directories in a path."""
501
+ print(f"📁 Listing directory: {path}")
502
+
503
+ try:
504
+ dir_path = Path.cwd() / path if path != "." else Path.cwd()
505
+
506
+ if not dir_path.is_dir():
507
+ return f"Error: '{path}' is not a valid directory."
508
+
509
+ items = sorted(dir_path.iterdir())
510
+
511
+ if not items:
512
+ return f"Directory '{path}' is empty."
513
+
514
+ files, directories = [], []
515
+
516
+ for item in items:
517
+ if item.is_dir():
518
+ directories.append(f"📁 {item.name}/")
519
+ else:
520
+ size = item.stat().st_size
521
+ files.append(f"📄 {item.name} ({size} bytes)")
522
+
523
+ result = f"Contents of '{path}':\n\n"
524
+ if directories:
525
+ result += "Directories:\n" + "\n".join(directories) + "\n\n"
526
+ if files:
527
+ result += "Files:\n" + "\n".join(files)
528
+
529
+ return result
530
+ except Exception as e:
531
+ return f"Error listing directory '{path}': {str(e)}"
532
+
533
+
534
+ class AudioInput(BaseModel):
535
+ file_path: str = Field(description="Path to audio file to transcribe")
536
+
537
+ @tool(args_schema=AudioInput)
538
+ def audio_transcription_tool(file_path: str) -> str:
539
+ """Transcribes audio files to text using Whisper."""
540
+ if not isinstance(file_path, str) or not file_path.strip():
541
+ return "Error: Invalid input. 'file_path' must be a non-empty string."
542
+
543
+ print(f"🎤 Transcribing audio: {file_path}")
544
+
545
+ if asr_pipeline is None:
546
+ return "Error: ASR pipeline is not available."
547
+
548
+ audio_path = find_file(file_path)
549
+ if not audio_path:
550
+ return f"Error: Audio file not found: '{file_path}'"
551
+
552
+ try:
553
+ transcription = asr_pipeline(str(audio_path))
554
+ result_text = transcription.get("text", "")
555
+
556
+ if not result_text:
557
+ return "Error: Transcription produced no text."
558
+
559
+ return f"Tra
560
  # =============================================================================
561
  # CONFIGURATION
562
  # =============================================================================
 
1699
  print("✅ Planning & Reflection Agent graph compiled successfully.")
1700
 
1701
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1702
 
1703
 
1704
  # =============================================================================