bstraehle commited on
Commit
5e37331
·
verified ·
1 Parent(s): d38fd6f

Update agents/tools/ai_tools.py

Browse files
Files changed (1) hide show
  1. agents/tools/ai_tools.py +310 -250
agents/tools/ai_tools.py CHANGED
@@ -17,6 +17,7 @@ from agents.models.llms import (
17
  LLM_IMAGE_TO_FEN,
18
  LLM_ALGEBRAIC_NOTATION,
19
  LLM_FINAL_ANSWER,
 
20
 
21
  THINKING_LEVEL_WEB_SEARCH,
22
  THINKING_LEVEL_MEDIA_ANALYSIS,
@@ -47,43 +48,54 @@ class AITools():
47
  def _get_client():
48
  return genai.Client(api_key=os.environ["GEMINI_API_KEY"])
49
 
 
 
 
 
 
50
  def _media_analysis_tool(tool_name: str, model: str, question: str, file_path: str) -> str:
51
  print("")
52
  print(f"🛠️ AITools: {tool_name}: question={question}, file_path={file_path}")
53
 
54
- try:
55
- client = AITools._get_client()
56
-
57
- file = client.files.upload(file=file_path)
58
-
59
- while True:
60
- media_file = client.files.get(name=file.name)
61
- if media_file.state == "ACTIVE":
62
- break
63
- elif media_file.state == "FAILED":
64
- raise RuntimeError("Media file processing failed")
65
- time.sleep(1)
66
-
67
- response = client.models.generate_content(
68
- model=model,
69
- contents=[file, question],
70
- config=types.GenerateContentConfig(
71
- thinking_config=types.ThinkingConfig(
72
- thinking_level=THINKING_LEVEL_MEDIA_ANALYSIS
 
 
 
73
  )
74
  )
75
- )
76
 
77
- result = response.text
78
-
79
- print(f"🛠️ AITools: {tool_name}: model={model}")
80
- print(f"🛠️ AITools: {tool_name}: thinking_level={THINKING_LEVEL_MEDIA_ANALYSIS}")
81
- print(f"🛠️ AITools: {tool_name}: result={result}")
82
-
83
- return result
84
- except Exception as e:
85
- print(f"⚠️ AITools: {tool_name}: exception={str(e)}")
86
- raise RuntimeError(f"Processing failed: {str(e)}")
 
 
 
 
87
 
88
  def _extract_execution_result(response):
89
  for part in response.candidates[0].content.parts:
@@ -108,30 +120,36 @@ class AITools():
108
  print("")
109
  print(f"🛠️ AITools: web_search_tool: question={question}")
110
 
111
- try:
112
- client = AITools._get_client()
113
-
114
- response = client.models.generate_content(
115
- model=LLM_WEB_SEARCH,
116
- contents=question,
117
- config=types.GenerateContentConfig(
118
- tools=[types.Tool(google_search=types.GoogleSearch())],
119
- thinking_config=types.ThinkingConfig(
120
- thinking_level=THINKING_LEVEL_WEB_SEARCH
 
 
 
121
  )
122
  )
123
- )
124
 
125
- result = response.text
126
-
127
- print(f"🛠️ AITools: web_search_tool: model={LLM_WEB_SEARCH}")
128
- print(f"🛠️ AITools: web_search_tool: thinking_level={THINKING_LEVEL_WEB_SEARCH}")
129
- print(f"🛠️ AITools: web_search_tool: result={result}")
130
-
131
- return result
132
- except Exception as e:
133
- print(f"⚠️ AITools: web_search_tool: exception={str(e)}")
134
- raise RuntimeError(f"Processing failed: {str(e)}")
 
 
 
 
135
 
136
  @tool("Web Browser Tool")
137
  def web_browser_tool(question: str, url: str) -> str:
@@ -247,30 +265,36 @@ class AITools():
247
  print("")
248
  print(f"🛠️ AITools: youtube_analysis_tool: question={question}, url={url}")
249
 
250
- try:
251
- client = AITools._get_client()
252
-
253
- result = client.models.generate_content(
254
- model=LLM_YOUTUBE_ANALYSIS,
255
- contents=types.Content(
256
- parts=[types.Part(file_data=types.FileData(file_uri=url)),
257
- types.Part(text=question)]
258
- ),
259
- config=types.GenerateContentConfig(
260
- thinking_config=types.ThinkingConfig(
261
- thinking_level=THINKING_LEVEL_YOUTUBE_ANALYSIS
 
 
 
262
  )
263
  )
264
- )
265
 
266
- print(f"🛠️ AITools: youtube_analysis_tool: model={LLM_YOUTUBE_ANALYSIS}")
267
- print(f"🛠️ AITools: youtube_analysis_tool: thinking_level={THINKING_LEVEL_YOUTUBE_ANALYSIS}")
268
- print(f"🛠️ AITools: youtube_analysis_tool: result={result}")
269
 
270
- return result
271
- except Exception as e:
272
- print(f"⚠️ AITools: youtube_analysis_tool: exception={str(e)}")
273
- raise RuntimeError(f"Processing failed: {str(e)}")
 
 
 
 
274
 
275
  @tool("Document Analysis Tool")
276
  def document_analysis_tool(question: str, file_path: str) -> str:
@@ -289,43 +313,49 @@ class AITools():
289
  print("")
290
  print(f"🛠️ AITools: document_analysis_tool: question={question}, file_path={file_path}")
291
 
292
- try:
293
- client = AITools._get_client()
294
-
295
- contents = []
296
-
297
- if is_ext(file_path, ".docx"):
298
- text_data = read_docx_text(file_path)
299
- contents = [f"{question}\n{text_data}"]
300
- print(f"🛠️ Text data:\n{text_data}")
301
- elif is_ext(file_path, ".pptx"):
302
- text_data = read_pptx_text(file_path)
303
- contents = [f"{question}\n{text_data}"]
304
- print(f"🛠️ Text data:\n{text_data}")
305
- else:
306
- file = client.files.upload(file=file_path)
307
- contents = [file, question]
308
-
309
- response = client.models.generate_content(
310
- model=LLM_DOCUMENT_ANALYSIS,
311
- contents=contents,
312
- config=types.GenerateContentConfig(
313
- thinking_config=types.ThinkingConfig(
314
- thinking_level=THINKING_LEVEL_DOCUMENT_ANALYSIS
 
 
 
315
  )
316
  )
317
- )
318
-
319
- result = response.text
320
-
321
- print(f"🛠️ AITools: document_analysis_tool: model={LLM_DOCUMENT_ANALYSIS}")
322
- print(f"🛠️ AITools: document_analysis_tool: thinking_level={THINKING_LEVEL_DOCUMENT_ANALYSIS}")
323
- print(f"🛠️ AITools: document_analysis_tool: result={result}")
324
-
325
- return result
326
- except Exception as e:
327
- print(f"⚠️ AITools: document_analysis_tool: exception={str(e)}")
328
- raise RuntimeError(f"Processing failed: {str(e)}")
 
 
 
329
 
330
  @tool("Code Generation and Execution Tool")
331
  def code_generation_and_execution_tool(question: str, json_data: str) -> str:
@@ -343,30 +373,36 @@ class AITools():
343
  print("")
344
  print(f"🛠️ AITools: code_generation_and_execution_tool: question={question}, json_data={json_data}")
345
 
346
- try:
347
- client = AITools._get_client()
348
-
349
- response = client.models.generate_content(
350
- model=LLM_CODE_GENERATION,
351
- contents=[f"{question}\n{json_data}"],
352
- config=types.GenerateContentConfig(
353
- tools=[types.Tool(code_execution=types.ToolCodeExecution)],
354
- thinking_config=types.ThinkingConfig(
355
- thinking_level=THINKING_LEVEL_CODE_GENERATION
356
- )
357
- ),
358
- )
 
 
359
 
360
- result = AITools._extract_execution_result(response)
361
 
362
- print(f"🛠️ AITools: code_generation_and_execution_tool: model={LLM_CODE_GENERATION}")
363
- print(f"🛠️ AITools: code_generation_and_execution_tool: thinking_level={THINKING_LEVEL_CODE_GENERATION}")
364
- print(f"🛠️ AITools: code_generation_and_execution_tool: result={result}")
365
 
366
- return result
367
- except Exception as e:
368
- print(f"⚠️ AITools: code_generation_and_execution_tool: exception={str(e)}")
369
- raise RuntimeError(f"Processing failed: {str(e)}")
 
 
 
 
370
 
371
  @tool("Code Execution Tool")
372
  def code_execution_tool(question: str, file_path: str) -> str:
@@ -385,32 +421,38 @@ class AITools():
385
  print("")
386
  print(f"🛠️ AITools: code_execution_tool: question={question}, file_path={file_path}")
387
 
388
- try:
389
- client = AITools._get_client()
390
-
391
- file = client.files.upload(file=file_path)
392
-
393
- response = client.models.generate_content(
394
- model=LLM_CODE_EXECUTION,
395
- contents=[file, question],
396
- config=types.GenerateContentConfig(
397
- tools=[types.Tool(code_execution=types.ToolCodeExecution)],
398
- thinking_config=types.ThinkingConfig(
399
- thinking_level=THINKING_LEVEL_CODE_EXECUTION
400
- )
401
- ),
402
- )
 
 
403
 
404
- result = AITools._extract_execution_result(response)
405
 
406
- print(f"🛠️ AITools: code_execution_tool: model={LLM_CODE_EXECUTION}")
407
- print(f"🛠️ AITools: code_execution_tool: thinking_level={THINKING_LEVEL_CODE_EXECUTION}")
408
- print(f"🛠️ AITools: code_execution_tool: result={result}")
409
 
410
- return result
411
- except Exception as e:
412
- print(f"⚠️ AITools: code_execution_tool: exception={str(e)}")
413
- raise RuntimeError(f"Processing failed: {str(e)}")
 
 
 
 
414
 
415
  @tool("Image to FEN Tool")
416
  def img_to_fen_tool(question: str, file_path: str, active_color: str) -> str:
@@ -430,54 +472,60 @@ class AITools():
430
  print("")
431
  print(f"🛠️ AITools: img_to_fen_tool: question={question}, file_path={file_path}, active_color={active_color}")
432
 
433
- try:
434
- client = AITools._get_client()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
435
 
436
- with open(file_path, "rb") as f:
437
- img_bytes = f.read()
438
- img_b64 = base64.b64encode(img_bytes).decode("ascii")
439
-
440
- prompt = PROMPT_IMG_TO_FEN.format(question=question, active_color=active_color)
441
-
442
- content = types.Content(
443
- parts=[
444
- types.Part(text=prompt),
445
- types.Part(
446
- inline_data=types.Blob(
447
- mime_type="image/png",
448
- data=base64.b64decode(img_b64),
449
  )
450
  )
451
- ]
452
- )
453
-
454
- response = client.models.generate_content(
455
- model=LLM_IMAGE_TO_FEN,
456
- contents=[content],
457
- config=types.GenerateContentConfig(
458
- thinking_config=types.ThinkingConfig(
459
- thinking_level=THINKING_LEVEL_IMAGE_TO_FEN
460
- )
461
  )
462
- )
463
 
464
- result = None
465
 
466
- for part in response.parts:
467
- if part.text is not None:
468
- result = part.text
469
- break
470
 
471
- board = chess.Board(result) # FEN validation
472
 
473
- print(f"🛠️ AITools: img_to_fen_tool: model={LLM_IMAGE_TO_FEN}")
474
- print(f"🛠️ AITools: img_to_fen_tool: thinking_level={THINKING_LEVEL_IMAGE_TO_FEN}")
475
- print(f"🛠️ AITools: img_to_fen_tool: result={result}")
476
 
477
- return result
478
- except Exception as e:
479
- print(f"⚠️ AITools: img_to_fen_tool: exception={str(e)}")
480
- raise RuntimeError(f"Processing failed: {str(e)}")
 
 
 
 
481
 
482
  @tool("Algebraic Notation Tool")
483
  def algebraic_notation_tool(question: str, file_path: str, position_evaluation: str) -> str:
@@ -497,52 +545,58 @@ class AITools():
497
  print("")
498
  print(f"🛠️ AITools: algebraic_notation_tool: question={question}, file_path={file_path}, position_evaluation={position_evaluation}")
499
 
500
- try:
501
- client = AITools._get_client()
502
-
503
- with open(file_path, "rb") as f:
504
- img_bytes = f.read()
505
- img_b64 = base64.b64encode(img_bytes).decode("ascii")
506
-
507
- prompt = PROMPT_ALGEBRAIC_NOTATION.format(question=question, position_evaluation=position_evaluation)
508
-
509
- content = types.Content(
510
- parts=[
511
- types.Part(text=prompt),
512
- types.Part(
513
- inline_data=types.Blob(
514
- mime_type="image/png",
515
- data=base64.b64decode(img_b64),
 
 
 
 
 
 
 
 
 
 
 
 
 
516
  )
517
- )
518
- ]
519
- )
520
-
521
- response = client.models.generate_content(
522
- model=LLM_ALGEBRAIC_NOTATION,
523
- contents=[content],
524
- config=types.GenerateContentConfig(
525
- thinking_config=types.ThinkingConfig(
526
- thinking_level=THINKING_LEVEL_ALGEBRAIC_NOTATION
527
  )
528
  )
529
- )
530
 
531
- result = None
532
-
533
- for part in response.parts:
534
- if part.text is not None:
535
- result = part.text
536
- break
537
 
538
- print(f"🛠️ AITools: algebraic_notation_tool: model={LLM_ALGEBRAIC_NOTATION}")
539
- print(f"🛠️ AITools: algebraic_notation_tool: thinking_level={THINKING_LEVEL_ALGEBRAIC_NOTATION}")
540
- print(f"🛠️ AITools: algebraic_notation_tool: result={result}")
541
 
542
- return result
543
- except Exception as e:
544
- print(f"⚠️ AITools: algebraic_notation_tool: exception={str(e)}")
545
- raise RuntimeError(f"Processing failed: {str(e)}")
 
 
 
 
546
 
547
  def final_answer_tool(question: str, answer: str) -> str:
548
  """Given a question and initial answer, get the final answer.
@@ -560,28 +614,34 @@ class AITools():
560
  print("")
561
  print(f"🛠️ AITools: final_answer_tool: question={question}, answer={answer}")
562
 
563
- try:
564
- client = AITools._get_client()
565
 
566
- prompt = PROMPT_FINAL_ANSWER.format(question=question, answer=answer)
567
-
568
- response = client.models.generate_content(
569
- model=LLM_FINAL_ANSWER,
570
- contents=[prompt],
571
- config=types.GenerateContentConfig(
572
- thinking_config=types.ThinkingConfig(
573
- thinking_level=THINKING_LEVEL_FINAL_ANSWER
 
 
 
574
  )
575
  )
576
- )
577
-
578
- result = response.text.strip()
579
 
580
- print(f"🛠️ AITools: final_answer_tool: model={LLM_FINAL_ANSWER}")
581
- print(f"🛠️ AITools: final_answer_tool: thinking_level={THINKING_LEVEL_FINAL_ANSWER}")
582
- print(f"🛠️ AITools: final_answer_tool: result={result}")
583
-
584
- return result
585
- except Exception as e:
586
- print(f"⚠️ AITools: final_answer_tool: exception={str(e)}")
587
- raise RuntimeError(f"Processing failed: {str(e)}")
 
 
 
 
 
17
  LLM_IMAGE_TO_FEN,
18
  LLM_ALGEBRAIC_NOTATION,
19
  LLM_FINAL_ANSWER,
20
+ LLM_FALLBACK,
21
 
22
  THINKING_LEVEL_WEB_SEARCH,
23
  THINKING_LEVEL_MEDIA_ANALYSIS,
 
48
  def _get_client():
49
  return genai.Client(api_key=os.environ["GEMINI_API_KEY"])
50
 
51
+ def _is_rate_limit_error(exception):
52
+ """Check if the exception is a rate limit error (429 RESOURCE_EXHAUSTED)."""
53
+ error_str = str(exception)
54
+ return "429" in error_str and "RESOURCE_EXHAUSTED" in error_str
55
+
56
  def _media_analysis_tool(tool_name: str, model: str, question: str, file_path: str) -> str:
57
  print("")
58
  print(f"🛠️ AITools: {tool_name}: question={question}, file_path={file_path}")
59
 
60
+ client = AITools._get_client()
61
+ current_model = model
62
+
63
+ for attempt in range(2):
64
+ try:
65
+ file = client.files.upload(file=file_path)
66
+
67
+ while True:
68
+ media_file = client.files.get(name=file.name)
69
+ if media_file.state == "ACTIVE":
70
+ break
71
+ elif media_file.state == "FAILED":
72
+ raise RuntimeError("Media file processing failed")
73
+ time.sleep(1)
74
+
75
+ response = client.models.generate_content(
76
+ model=current_model,
77
+ contents=[file, question],
78
+ config=types.GenerateContentConfig(
79
+ thinking_config=types.ThinkingConfig(
80
+ thinking_level=THINKING_LEVEL_MEDIA_ANALYSIS
81
+ )
82
  )
83
  )
 
84
 
85
+ result = response.text
86
+
87
+ print(f"🛠️ AITools: {tool_name}: model={current_model}")
88
+ print(f"🛠️ AITools: {tool_name}: thinking_level={THINKING_LEVEL_MEDIA_ANALYSIS}")
89
+ print(f"🛠️ AITools: {tool_name}: result={result}")
90
+
91
+ return result
92
+ except Exception as e:
93
+ if attempt == 0 and AITools._is_rate_limit_error(e):
94
+ print(f"⚠️ AITools: {tool_name}: Rate limit hit with {current_model}, falling back to {LLM_FALLBACK}")
95
+ current_model = LLM_FALLBACK
96
+ continue
97
+ print(f"⚠️ AITools: {tool_name}: exception={str(e)}")
98
+ raise RuntimeError(f"Processing failed: {str(e)}")
99
 
100
  def _extract_execution_result(response):
101
  for part in response.candidates[0].content.parts:
 
120
  print("")
121
  print(f"🛠️ AITools: web_search_tool: question={question}")
122
 
123
+ client = AITools._get_client()
124
+ model = LLM_WEB_SEARCH
125
+
126
+ for attempt in range(2):
127
+ try:
128
+ response = client.models.generate_content(
129
+ model=model,
130
+ contents=question,
131
+ config=types.GenerateContentConfig(
132
+ tools=[types.Tool(google_search=types.GoogleSearch())],
133
+ thinking_config=types.ThinkingConfig(
134
+ thinking_level=THINKING_LEVEL_WEB_SEARCH
135
+ )
136
  )
137
  )
 
138
 
139
+ result = response.text
140
+
141
+ print(f"🛠️ AITools: web_search_tool: model={model}")
142
+ print(f"🛠️ AITools: web_search_tool: thinking_level={THINKING_LEVEL_WEB_SEARCH}")
143
+ print(f"🛠️ AITools: web_search_tool: result={result}")
144
+
145
+ return result
146
+ except Exception as e:
147
+ if attempt == 0 and AITools._is_rate_limit_error(e):
148
+ print(f"⚠️ AITools: web_search_tool: Rate limit hit with {model}, falling back to {LLM_FALLBACK}")
149
+ model = LLM_FALLBACK
150
+ continue
151
+ print(f"⚠️ AITools: web_search_tool: exception={str(e)}")
152
+ raise RuntimeError(f"Processing failed: {str(e)}")
153
 
154
  @tool("Web Browser Tool")
155
  def web_browser_tool(question: str, url: str) -> str:
 
265
  print("")
266
  print(f"🛠️ AITools: youtube_analysis_tool: question={question}, url={url}")
267
 
268
+ client = AITools._get_client()
269
+ model = LLM_YOUTUBE_ANALYSIS
270
+
271
+ for attempt in range(2):
272
+ try:
273
+ result = client.models.generate_content(
274
+ model=model,
275
+ contents=types.Content(
276
+ parts=[types.Part(file_data=types.FileData(file_uri=url)),
277
+ types.Part(text=question)]
278
+ ),
279
+ config=types.GenerateContentConfig(
280
+ thinking_config=types.ThinkingConfig(
281
+ thinking_level=THINKING_LEVEL_YOUTUBE_ANALYSIS
282
+ )
283
  )
284
  )
 
285
 
286
+ print(f"🛠️ AITools: youtube_analysis_tool: model={model}")
287
+ print(f"🛠️ AITools: youtube_analysis_tool: thinking_level={THINKING_LEVEL_YOUTUBE_ANALYSIS}")
288
+ print(f"🛠️ AITools: youtube_analysis_tool: result={result}")
289
 
290
+ return result
291
+ except Exception as e:
292
+ if attempt == 0 and AITools._is_rate_limit_error(e):
293
+ print(f"⚠️ AITools: youtube_analysis_tool: Rate limit hit with {model}, falling back to {LLM_FALLBACK}")
294
+ model = LLM_FALLBACK
295
+ continue
296
+ print(f"⚠️ AITools: youtube_analysis_tool: exception={str(e)}")
297
+ raise RuntimeError(f"Processing failed: {str(e)}")
298
 
299
  @tool("Document Analysis Tool")
300
  def document_analysis_tool(question: str, file_path: str) -> str:
 
313
  print("")
314
  print(f"🛠️ AITools: document_analysis_tool: question={question}, file_path={file_path}")
315
 
316
+ client = AITools._get_client()
317
+ model = LLM_DOCUMENT_ANALYSIS
318
+
319
+ for attempt in range(2):
320
+ try:
321
+ contents = []
322
+
323
+ if is_ext(file_path, ".docx"):
324
+ text_data = read_docx_text(file_path)
325
+ contents = [f"{question}\n{text_data}"]
326
+ print(f"🛠️ Text data:\n{text_data}")
327
+ elif is_ext(file_path, ".pptx"):
328
+ text_data = read_pptx_text(file_path)
329
+ contents = [f"{question}\n{text_data}"]
330
+ print(f"🛠️ Text data:\n{text_data}")
331
+ else:
332
+ file = client.files.upload(file=file_path)
333
+ contents = [file, question]
334
+
335
+ response = client.models.generate_content(
336
+ model=model,
337
+ contents=contents,
338
+ config=types.GenerateContentConfig(
339
+ thinking_config=types.ThinkingConfig(
340
+ thinking_level=THINKING_LEVEL_DOCUMENT_ANALYSIS
341
+ )
342
  )
343
  )
344
+
345
+ result = response.text
346
+
347
+ print(f"🛠️ AITools: document_analysis_tool: model={model}")
348
+ print(f"🛠️ AITools: document_analysis_tool: thinking_level={THINKING_LEVEL_DOCUMENT_ANALYSIS}")
349
+ print(f"🛠️ AITools: document_analysis_tool: result={result}")
350
+
351
+ return result
352
+ except Exception as e:
353
+ if attempt == 0 and AITools._is_rate_limit_error(e):
354
+ print(f"⚠️ AITools: document_analysis_tool: Rate limit hit with {model}, falling back to {LLM_FALLBACK}")
355
+ model = LLM_FALLBACK
356
+ continue
357
+ print(f"⚠️ AITools: document_analysis_tool: exception={str(e)}")
358
+ raise RuntimeError(f"Processing failed: {str(e)}")
359
 
360
  @tool("Code Generation and Execution Tool")
361
  def code_generation_and_execution_tool(question: str, json_data: str) -> str:
 
373
  print("")
374
  print(f"🛠️ AITools: code_generation_and_execution_tool: question={question}, json_data={json_data}")
375
 
376
+ client = AITools._get_client()
377
+ model = LLM_CODE_GENERATION
378
+
379
+ for attempt in range(2):
380
+ try:
381
+ response = client.models.generate_content(
382
+ model=model,
383
+ contents=[f"{question}\n{json_data}"],
384
+ config=types.GenerateContentConfig(
385
+ tools=[types.Tool(code_execution=types.ToolCodeExecution)],
386
+ thinking_config=types.ThinkingConfig(
387
+ thinking_level=THINKING_LEVEL_CODE_GENERATION
388
+ )
389
+ ),
390
+ )
391
 
392
+ result = AITools._extract_execution_result(response)
393
 
394
+ print(f"🛠️ AITools: code_generation_and_execution_tool: model={model}")
395
+ print(f"🛠️ AITools: code_generation_and_execution_tool: thinking_level={THINKING_LEVEL_CODE_GENERATION}")
396
+ print(f"🛠️ AITools: code_generation_and_execution_tool: result={result}")
397
 
398
+ return result
399
+ except Exception as e:
400
+ if attempt == 0 and AITools._is_rate_limit_error(e):
401
+ print(f"⚠️ AITools: code_generation_and_execution_tool: Rate limit hit with {model}, falling back to {LLM_FALLBACK}")
402
+ model = LLM_FALLBACK
403
+ continue
404
+ print(f"⚠️ AITools: code_generation_and_execution_tool: exception={str(e)}")
405
+ raise RuntimeError(f"Processing failed: {str(e)}")
406
 
407
  @tool("Code Execution Tool")
408
  def code_execution_tool(question: str, file_path: str) -> str:
 
421
  print("")
422
  print(f"🛠️ AITools: code_execution_tool: question={question}, file_path={file_path}")
423
 
424
+ client = AITools._get_client()
425
+ model = LLM_CODE_EXECUTION
426
+
427
+ for attempt in range(2):
428
+ try:
429
+ file = client.files.upload(file=file_path)
430
+
431
+ response = client.models.generate_content(
432
+ model=model,
433
+ contents=[file, question],
434
+ config=types.GenerateContentConfig(
435
+ tools=[types.Tool(code_execution=types.ToolCodeExecution)],
436
+ thinking_config=types.ThinkingConfig(
437
+ thinking_level=THINKING_LEVEL_CODE_EXECUTION
438
+ )
439
+ ),
440
+ )
441
 
442
+ result = AITools._extract_execution_result(response)
443
 
444
+ print(f"🛠️ AITools: code_execution_tool: model={model}")
445
+ print(f"🛠️ AITools: code_execution_tool: thinking_level={THINKING_LEVEL_CODE_EXECUTION}")
446
+ print(f"🛠️ AITools: code_execution_tool: result={result}")
447
 
448
+ return result
449
+ except Exception as e:
450
+ if attempt == 0 and AITools._is_rate_limit_error(e):
451
+ print(f"⚠️ AITools: code_execution_tool: Rate limit hit with {model}, falling back to {LLM_FALLBACK}")
452
+ model = LLM_FALLBACK
453
+ continue
454
+ print(f"⚠️ AITools: code_execution_tool: exception={str(e)}")
455
+ raise RuntimeError(f"Processing failed: {str(e)}")
456
 
457
  @tool("Image to FEN Tool")
458
  def img_to_fen_tool(question: str, file_path: str, active_color: str) -> str:
 
472
  print("")
473
  print(f"🛠️ AITools: img_to_fen_tool: question={question}, file_path={file_path}, active_color={active_color}")
474
 
475
+ client = AITools._get_client()
476
+ model = LLM_IMAGE_TO_FEN
477
+
478
+ for attempt in range(2):
479
+ try:
480
+ with open(file_path, "rb") as f:
481
+ img_bytes = f.read()
482
+ img_b64 = base64.b64encode(img_bytes).decode("ascii")
483
+
484
+ prompt = PROMPT_IMG_TO_FEN.format(question=question, active_color=active_color)
485
+
486
+ content = types.Content(
487
+ parts=[
488
+ types.Part(text=prompt),
489
+ types.Part(
490
+ inline_data=types.Blob(
491
+ mime_type="image/png",
492
+ data=base64.b64decode(img_b64),
493
+ )
494
+ )
495
+ ]
496
+ )
497
 
498
+ response = client.models.generate_content(
499
+ model=model,
500
+ contents=[content],
501
+ config=types.GenerateContentConfig(
502
+ thinking_config=types.ThinkingConfig(
503
+ thinking_level=THINKING_LEVEL_IMAGE_TO_FEN
 
 
 
 
 
 
 
504
  )
505
  )
 
 
 
 
 
 
 
 
 
 
506
  )
 
507
 
508
+ result = None
509
 
510
+ for part in response.parts:
511
+ if part.text is not None:
512
+ result = part.text
513
+ break
514
 
515
+ board = chess.Board(result) # FEN validation
516
 
517
+ print(f"🛠️ AITools: img_to_fen_tool: model={model}")
518
+ print(f"🛠️ AITools: img_to_fen_tool: thinking_level={THINKING_LEVEL_IMAGE_TO_FEN}")
519
+ print(f"🛠️ AITools: img_to_fen_tool: result={result}")
520
 
521
+ return result
522
+ except Exception as e:
523
+ if attempt == 0 and AITools._is_rate_limit_error(e):
524
+ print(f"⚠️ AITools: img_to_fen_tool: Rate limit hit with {model}, falling back to {LLM_FALLBACK}")
525
+ model = LLM_FALLBACK
526
+ continue
527
+ print(f"⚠️ AITools: img_to_fen_tool: exception={str(e)}")
528
+ raise RuntimeError(f"Processing failed: {str(e)}")
529
 
530
  @tool("Algebraic Notation Tool")
531
  def algebraic_notation_tool(question: str, file_path: str, position_evaluation: str) -> str:
 
545
  print("")
546
  print(f"🛠️ AITools: algebraic_notation_tool: question={question}, file_path={file_path}, position_evaluation={position_evaluation}")
547
 
548
+ client = AITools._get_client()
549
+ model = LLM_ALGEBRAIC_NOTATION
550
+
551
+ for attempt in range(2):
552
+ try:
553
+ with open(file_path, "rb") as f:
554
+ img_bytes = f.read()
555
+ img_b64 = base64.b64encode(img_bytes).decode("ascii")
556
+
557
+ prompt = PROMPT_ALGEBRAIC_NOTATION.format(question=question, position_evaluation=position_evaluation)
558
+
559
+ content = types.Content(
560
+ parts=[
561
+ types.Part(text=prompt),
562
+ types.Part(
563
+ inline_data=types.Blob(
564
+ mime_type="image/png",
565
+ data=base64.b64decode(img_b64),
566
+ )
567
+ )
568
+ ]
569
+ )
570
+
571
+ response = client.models.generate_content(
572
+ model=model,
573
+ contents=[content],
574
+ config=types.GenerateContentConfig(
575
+ thinking_config=types.ThinkingConfig(
576
+ thinking_level=THINKING_LEVEL_ALGEBRAIC_NOTATION
577
  )
 
 
 
 
 
 
 
 
 
 
578
  )
579
  )
 
580
 
581
+ result = None
582
+
583
+ for part in response.parts:
584
+ if part.text is not None:
585
+ result = part.text
586
+ break
587
 
588
+ print(f"🛠️ AITools: algebraic_notation_tool: model={model}")
589
+ print(f"🛠️ AITools: algebraic_notation_tool: thinking_level={THINKING_LEVEL_ALGEBRAIC_NOTATION}")
590
+ print(f"🛠️ AITools: algebraic_notation_tool: result={result}")
591
 
592
+ return result
593
+ except Exception as e:
594
+ if attempt == 0 and AITools._is_rate_limit_error(e):
595
+ print(f"⚠️ AITools: algebraic_notation_tool: Rate limit hit with {model}, falling back to {LLM_FALLBACK}")
596
+ model = LLM_FALLBACK
597
+ continue
598
+ print(f"⚠️ AITools: algebraic_notation_tool: exception={str(e)}")
599
+ raise RuntimeError(f"Processing failed: {str(e)}")
600
 
601
  def final_answer_tool(question: str, answer: str) -> str:
602
  """Given a question and initial answer, get the final answer.
 
614
  print("")
615
  print(f"🛠️ AITools: final_answer_tool: question={question}, answer={answer}")
616
 
617
+ client = AITools._get_client()
618
+ model = LLM_FINAL_ANSWER
619
 
620
+ for attempt in range(2):
621
+ try:
622
+ prompt = PROMPT_FINAL_ANSWER.format(question=question, answer=answer)
623
+
624
+ response = client.models.generate_content(
625
+ model=model,
626
+ contents=[prompt],
627
+ config=types.GenerateContentConfig(
628
+ thinking_config=types.ThinkingConfig(
629
+ thinking_level=THINKING_LEVEL_FINAL_ANSWER
630
+ )
631
  )
632
  )
633
+
634
+ result = response.text.strip()
 
635
 
636
+ print(f"🛠️ AITools: final_answer_tool: model={model}")
637
+ print(f"🛠️ AITools: final_answer_tool: thinking_level={THINKING_LEVEL_FINAL_ANSWER}")
638
+ print(f"🛠️ AITools: final_answer_tool: result={result}")
639
+
640
+ return result
641
+ except Exception as e:
642
+ if attempt == 0 and AITools._is_rate_limit_error(e):
643
+ print(f"⚠️ AITools: final_answer_tool: Rate limit hit with {model}, falling back to {LLM_FALLBACK}")
644
+ model = LLM_FALLBACK
645
+ continue
646
+ print(f"⚠️ AITools: final_answer_tool: exception={str(e)}")
647
+ raise RuntimeError(f"Processing failed: {str(e)}")