bstraehle commited on
Commit
a8c8fe8
Β·
verified Β·
1 Parent(s): 6b0f7b4

Upload 3 files

Browse files
agents/tools/ai_tools.py CHANGED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # References:
2
+
3
+ # https://docs.crewai.com/introduction
4
+ # https://ai.google.dev/gemini-api/docs
5
+
6
+ import base64, chess, os
7
+ from agents.models.llms import (
8
+ LLM_WEB_SEARCH,
9
+ LLM_WEB_BROWSER,
10
+ LLM_IMAGE_ANALYSIS,
11
+ LLM_AUDIO_ANALYSIS,
12
+ LLM_VIDEO_ANALYSIS,
13
+ LLM_YOUTUBE_ANALYSIS,
14
+ LLM_DOCUMENT_ANALYSIS,
15
+ LLM_CODE_GENERATION,
16
+ LLM_CODE_EXECUTION,
17
+ LLM_IMAGE_TO_FEN,
18
+ LLM_ALGEBRAIC_NOTATION,
19
+ LLM_FINAL_ANSWER,
20
+
21
+ THINKING_LEVEL_WEB_SEARCH,
22
+ THINKING_LEVEL_MEDIA_ANALYSIS,
23
+ THINKING_LEVEL_YOUTUBE_ANALYSIS,
24
+ THINKING_LEVEL_DOCUMENT_ANALYSIS,
25
+ THINKING_LEVEL_CODE_GENERATION,
26
+ THINKING_LEVEL_CODE_EXECUTION,
27
+ THINKING_LEVEL_IMAGE_TO_FEN,
28
+ THINKING_LEVEL_ALGEBRAIC_NOTATION,
29
+ THINKING_LEVEL_FINAL_ANSWER
30
+ )
31
+ from agents.models.prompts import (
32
+ PROMPT_IMG_TO_FEN,
33
+ PROMPT_ALGEBRAIC_NOTATION,
34
+ PROMPT_FINAL_ANSWER
35
+ )
36
+ from crewai.tools import tool
37
+ from crewai_tools import StagehandTool
38
+ from google import genai
39
+ from google.genai import types
40
+ from utils.utils import (
41
+ read_docx_text,
42
+ read_pptx_text,
43
+ is_ext
44
+ )
45
+
46
+ class AITools():
47
+ def _get_client():
48
+ return genai.Client(api_key=os.environ["GEMINI_API_KEY"])
49
+
50
+ def _media_analysis_tool(tool_name: str, model: str, question: str, file_path: str) -> str:
51
+ print(f"πŸ› οΈ AITools: {tool_name}: question={question}, file_path={file_path}")
52
+
53
+ try:
54
+ client = AITools._get_client()
55
+
56
+ file = client.files.upload(file=file_path)
57
+
58
+ response = client.models.generate_content(
59
+ model=model,
60
+ contents=[file, question],
61
+ config=types.GenerateContentConfig(
62
+ thinking_config=types.ThinkingConfig(
63
+ thinking_level=THINKING_LEVEL_MEDIA_ANALYSIS
64
+ )
65
+ )
66
+ )
67
+
68
+ result = response.text.strip()
69
+ print(f"πŸ› οΈ AITools: {tool_name}: result={result}")
70
+ return result
71
+ except Exception as e:
72
+ print(f"⚠️ AITools: {tool_name}: exception={str(e)}")
73
+ raise RuntimeError(f"Processing failed: {str(e)}")
74
+
75
+ def _extract_execution_result(response):
76
+ for part in response.candidates[0].content.parts:
77
+ if part.code_execution_result is not None:
78
+ return part.code_execution_result.output.strip()
79
+
80
+ return None
81
+
82
+ @tool("Web Search Tool")
83
+ def web_search_tool(question: str) -> str:
84
+ """Given a question only, search the web to answer the question.
85
+
86
+ Args:
87
+ question (str): Question to answer
88
+
89
+ Returns:
90
+ str: Answer to the question
91
+
92
+ Raises:
93
+ RuntimeError: If processing fails
94
+ """
95
+ print(f"πŸ› οΈ AITools: web_search_tool: question={question}")
96
+
97
+ try:
98
+ client = AITools._get_client()
99
+
100
+ response = client.models.generate_content(
101
+ model=LLM_WEB_SEARCH,
102
+ contents=question,
103
+ config=types.GenerateContentConfig(
104
+ tools=[types.Tool(google_search=types.GoogleSearch())],
105
+ thinking_config=types.ThinkingConfig(
106
+ thinking_level=THINKING_LEVEL_WEB_SEARCH
107
+ )
108
+ )
109
+ )
110
+
111
+ result = response.text.strip()
112
+ print(f"πŸ› οΈ AITools: web_search_tool: result={result}")
113
+ return result
114
+ except Exception as e:
115
+ print(f"⚠️ AITools: web_search_tool: exception={str(e)}")
116
+ raise RuntimeError(f"Processing failed: {str(e)}")
117
+
118
+ @tool("Web Browser Tool")
119
+ def web_browser_tool(question: str, url: str) -> str:
120
+ """Given a question and URL, load the URL and act, extract, or observe to answer the question.
121
+
122
+ Args:
123
+ question (str): Question about a URL
124
+ url (str): The target URL (must be http/https). "http://"/"https://" will be auto-added if missing.
125
+
126
+ Returns:
127
+ str: Answer to the question
128
+
129
+ Raises:
130
+ RuntimeError: If processing fails
131
+ """
132
+ print(f"πŸ› οΈ AITools: web_browser_tool: question={question}, url={url}")
133
+
134
+ try:
135
+ url_str = url.strip()
136
+
137
+ if not url_str.lower().startswith(("http://", "https://")):
138
+ url_str = f"https://{url_str}"
139
+
140
+ with StagehandTool(
141
+ api_key=os.environ["BROWSERBASE_API_KEY"],
142
+ project_id=os.environ["BROWSERBASE_PROJECT_ID"],
143
+ model_api_key=os.environ["BROWSERBASE_MODEL_API_KEY"],
144
+ model_name=LLM_WEB_BROWSER,
145
+ dom_settle_timeout_ms=5000,
146
+ headless=True,
147
+ self_heal=True,
148
+ wait_for_captcha_solves=True,
149
+ verbose=3
150
+ ) as stagehand_tool:
151
+ result = stagehand_tool.run(
152
+ instruction=question,
153
+ url=url_str,
154
+ command_type="act" # TODO: act, extract, observe
155
+ ).strip()
156
+
157
+ print(f"πŸ› οΈ AITools: web_browser_tool: result={result}")
158
+ return result
159
+ except Exception as e:
160
+ print(f"⚠️ AITools: web_browser_tool: exception={str(e)}")
161
+ raise RuntimeError(f"Processing failed: {str(e)}")
162
+
163
+ @tool("Image Analysis Tool")
164
+ def image_analysis_tool(question: str, file_path: str) -> str:
165
+ """Given a question and image file, analyze the image to answer the question.
166
+
167
+ Args:
168
+ question (str): Question about an image file
169
+ file_path (str): The image file path
170
+
171
+ Returns:
172
+ str: Answer to the question about the image file
173
+
174
+ Raises:
175
+ RuntimeError: If processing fails
176
+ """
177
+ return AITools._media_analysis_tool("image_analysis_tool", LLM_IMAGE_ANALYSIS, question, file_path)
178
+
179
+ @tool("Audio Analysis Tool")
180
+ def audio_analysis_tool(question: str, file_path: str) -> str:
181
+ """Given a question and audio file, analyze the audio to answer the question.
182
+
183
+ Args:
184
+ question (str): Question about an audio file
185
+ file_path (str): The audio file path
186
+
187
+ Returns:
188
+ str: Answer to the question about the audio file
189
+
190
+ Raises:
191
+ RuntimeError: If processing fails
192
+ """
193
+ return AITools._media_analysis_tool("audio_analysis_tool", LLM_AUDIO_ANALYSIS, question, file_path)
194
+
195
+ @tool("Video Analysis Tool")
196
+ def video_analysis_tool(question: str, file_path: str) -> str:
197
+ """Given a question and video file, analyze the video to answer the question.
198
+
199
+ Args:
200
+ question (str): Question about a video file
201
+ file_path (str): The video file path
202
+
203
+ Returns:
204
+ str: Answer to the question about the video file
205
+
206
+ Raises:
207
+ RuntimeError: If processing fails
208
+ """
209
+ return AITools._media_analysis_tool("video_analysis_tool", LLM_VIDEO_ANALYSIS, question, file_path)
210
+
211
+ @tool("YouTube Analysis Tool")
212
+ def youtube_analysis_tool(question: str, url: str) -> str:
213
+ """Given a question and YouTube URL, analyze the video to answer the question.
214
+
215
+ Args:
216
+ question (str): Question about a YouTube video
217
+ url (str): The YouTube URL
218
+
219
+ Returns:
220
+ str: Answer to the question about the YouTube video
221
+
222
+ Raises:
223
+ RuntimeError: If processing fails
224
+ """
225
+ print(f"πŸ› οΈ AITools: youtube_analysis_tool: question={question}, url={url}")
226
+
227
+ try:
228
+ client = AITools._get_client()
229
+
230
+ result = client.models.generate_content(
231
+ model=LLM_YOUTUBE_ANALYSIS,
232
+ contents=types.Content(
233
+ parts=[types.Part(file_data=types.FileData(file_uri=url)),
234
+ types.Part(text=question)]
235
+ ),
236
+ config=types.GenerateContentConfig(
237
+ thinking_config=types.ThinkingConfig(
238
+ thinking_level=THINKING_LEVEL_YOUTUBE_ANALYSIS
239
+ )
240
+ )
241
+ ).strip()
242
+
243
+ print(f"πŸ› οΈ AITools: youtube_analysis_tool: result={result}")
244
+ return result
245
+ except Exception as e:
246
+ print(f"⚠️ AITools: youtube_analysis_tool: exception={str(e)}")
247
+ raise RuntimeError(f"Processing failed: {str(e)}")
248
+
249
+ @tool("Document Analysis Tool")
250
+ def document_analysis_tool(question: str, file_path: str) -> str:
251
+ """Given a question and document file, analyze the document to answer the question.
252
+
253
+ Args:
254
+ question (str): Question about a document file
255
+ file_path (str): The document file path
256
+
257
+ Returns:
258
+ str: Answer to the question about the document file
259
+
260
+ Raises:
261
+ RuntimeError: If processing fails
262
+ """
263
+ print(f"πŸ› οΈ AITools: document_analysis_tool: question={question}, file_path={file_path}")
264
+
265
+ try:
266
+ client = AITools._get_client()
267
+
268
+ contents = []
269
+
270
+ if is_ext(file_path, ".docx"):
271
+ text_data = read_docx_text(file_path)
272
+ contents = [f"{question}\n{text_data}"]
273
+ print(f"πŸ› οΈ Text data:\n{text_data}")
274
+ elif is_ext(file_path, ".pptx"):
275
+ text_data = read_pptx_text(file_path)
276
+ contents = [f"{question}\n{text_data}"]
277
+ print(f"πŸ› οΈ Text data:\n{text_data}")
278
+ else:
279
+ file = client.files.upload(file=file_path)
280
+ contents = [file, question]
281
+
282
+ response = client.models.generate_content(
283
+ model=LLM_DOCUMENT_ANALYSIS,
284
+ contents=contents,
285
+ config=types.GenerateContentConfig(
286
+ thinking_config=types.ThinkingConfig(
287
+ thinking_level=THINKING_LEVEL_DOCUMENT_ANALYSIS
288
+ )
289
+ )
290
+ )
291
+
292
+ result = response.text.strip()
293
+ print(f"πŸ› οΈ AITools: document_analysis_tool: result={result}")
294
+ return result
295
+ except Exception as e:
296
+ print(f"⚠️ AITools: document_analysis_tool: exception={str(e)}")
297
+ raise RuntimeError(f"Processing failed: {str(e)}")
298
+
299
+ @tool("Code Generation and Execution Tool")
300
+ def code_generation_and_execution_tool(question: str, json_data: str) -> str:
301
+ """Given a question and JSON data, generate and execute code to answer the question.
302
+ Args:
303
+ question (str): Question to answer
304
+ file_path (str): The JSON data
305
+
306
+ Returns:
307
+ str: Answer to the question
308
+
309
+ Raises:
310
+ RuntimeError: If processing fails
311
+ """
312
+ print(f"πŸ› οΈ AITools: code_generation_and_execution_tool: question={question}, json_data={json_data}")
313
+
314
+ try:
315
+ client = AITools._get_client()
316
+
317
+ response = client.models.generate_content(
318
+ model=LLM_CODE_GENERATION,
319
+ contents=[f"{question}\n{json_data}"],
320
+ config=types.GenerateContentConfig(
321
+ tools=[types.Tool(code_execution=types.ToolCodeExecution)],
322
+ thinking_config=types.ThinkingConfig(
323
+ thinking_level=THINKING_LEVEL_CODE_GENERATION
324
+ )
325
+ ),
326
+ )
327
+
328
+ result = AITools._extract_execution_result(response)
329
+
330
+ print(f"πŸ› οΈ AITools: code_generation_and_execution_tool: result={result}")
331
+ return result
332
+ except Exception as e:
333
+ print(f"⚠️ AITools: code_generation_and_execution_tool: exception={str(e)}")
334
+ raise RuntimeError(f"Processing failed: {str(e)}")
335
+
336
+ @tool("Code Execution Tool")
337
+ def code_execution_tool(question: str, file_path: str) -> str:
338
+ """Given a question and Python file, execute the file to answer the question.
339
+
340
+ Args:
341
+ question (str): Question to answer
342
+ file_path (str): The Python file path
343
+
344
+ Returns:
345
+ str: Answer to the question
346
+
347
+ Raises:
348
+ RuntimeError: If processing fails
349
+ """
350
+ print(f"πŸ› οΈ AITools: code_execution_tool: question={question}, file_path={file_path}")
351
+
352
+ try:
353
+ client = AITools._get_client()
354
+
355
+ file = client.files.upload(file=file_path)
356
+
357
+ response = client.models.generate_content(
358
+ model=LLM_CODE_EXECUTION,
359
+ contents=[file, question],
360
+ config=types.GenerateContentConfig(
361
+ tools=[types.Tool(code_execution=types.ToolCodeExecution)],
362
+ thinking_config=types.ThinkingConfig(
363
+ thinking_level=THINKING_LEVEL_CODE_EXECUTION
364
+ )
365
+ ),
366
+ )
367
+
368
+ result = AITools._extract_execution_result(response)
369
+
370
+ print(f"πŸ› οΈ AITools: code_execution_tool: result={result}")
371
+ return result
372
+ except Exception as e:
373
+ print(f"⚠️ AITools: code_execution_tool: exception={str(e)}")
374
+ raise RuntimeError(f"Processing failed: {str(e)}")
375
+
376
+ @tool("Image to FEN Tool")
377
+ def img_to_fen_tool(question: str, file_path: str, active_color: str) -> str:
378
+ """Given a chess question, image file, and active color, return the FEN.
379
+
380
+ Args:
381
+ question (str): The chess question
382
+ file_path (str): The image file path
383
+ active_color (str): The active color
384
+
385
+ Returns:
386
+ str: FEN of the chess position
387
+
388
+ Raises:
389
+ RuntimeError: If processing fails
390
+ """
391
+ print(f"πŸ› οΈ AITools: img_to_fen_tool: question={question}, file_path={file_path}, active_color={active_color}")
392
+
393
+ try:
394
+ client = AITools._get_client()
395
+
396
+ with open(file_path, "rb") as f:
397
+ img_bytes = f.read()
398
+ img_b64 = base64.b64encode(img_bytes).decode("ascii")
399
+
400
+ prompt = PROMPT_IMG_TO_FEN.format(question=question, active_color=active_color)
401
+
402
+ content = types.Content(
403
+ parts=[
404
+ types.Part(text=prompt),
405
+ types.Part(
406
+ inline_data=types.Blob(
407
+ mime_type="image/png",
408
+ data=base64.b64decode(img_b64),
409
+ )
410
+ )
411
+ ]
412
+ )
413
+
414
+ response = client.models.generate_content(
415
+ model=LLM_IMAGE_TO_FEN,
416
+ contents=[content],
417
+ config=types.GenerateContentConfig(
418
+ thinking_config=types.ThinkingConfig(
419
+ thinking_level=THINKING_LEVEL_IMAGE_TO_FEN
420
+ )
421
+ )
422
+ )
423
+
424
+ result = None
425
+
426
+ for part in response.parts:
427
+ if part.text is not None:
428
+ result = part.text.strip()
429
+ break
430
+
431
+ board = chess.Board(result) # FEN validation
432
+
433
+ print(f"πŸ› οΈ AITools: img_to_fen_tool: result={result}")
434
+ return result
435
+ except Exception as e:
436
+ print(f"⚠️ AITools: img_to_fen_tool: exception={str(e)}")
437
+ raise RuntimeError(f"Processing failed: {str(e)}")
438
+
439
+ @tool("Algebraic Notation Tool")
440
+ def algebraic_notation_tool(question: str, file_path: str, position_evaluation: str) -> str:
441
+ """Given a chess question, image file, and position evaluation in UCI notation, answer the question in algebraic notation.
442
+
443
+ Args:
444
+ question (str): The chess question
445
+ file_path (str): The image file path
446
+ position_evaluation (str): The position evaluation in UCI notation
447
+
448
+ Returns:
449
+ str: Answer to the question in algebraic notation
450
+
451
+ Raises:
452
+ RuntimeError: If processing fails
453
+ """
454
+ print(f"πŸ› οΈ AITools: algebraic_notation_tool: question={question}, file_path={file_path}, position_evaluation={position_evaluation}")
455
+
456
+ try:
457
+ client = AITools._get_client()
458
+
459
+ with open(file_path, "rb") as f:
460
+ img_bytes = f.read()
461
+ img_b64 = base64.b64encode(img_bytes).decode("ascii")
462
+
463
+ prompt = PROMPT_ALGEBRAIC_NOTATION.format(question=question, position_evaluation=position_evaluation)
464
+
465
+ content = types.Content(
466
+ parts=[
467
+ types.Part(text=prompt),
468
+ types.Part(
469
+ inline_data=types.Blob(
470
+ mime_type="image/png",
471
+ data=base64.b64decode(img_b64),
472
+ )
473
+ )
474
+ ]
475
+ )
476
+
477
+ response = client.models.generate_content(
478
+ model=LLM_ALGEBRAIC_NOTATION,
479
+ contents=[content],
480
+ config=types.GenerateContentConfig(
481
+ thinking_config=types.ThinkingConfig(
482
+ thinking_level=THINKING_LEVEL_ALGEBRAIC_NOTATION
483
+ )
484
+ )
485
+ )
486
+
487
+ result = None
488
+
489
+ for part in response.parts:
490
+ if part.text is not None:
491
+ result = part.text.strip()
492
+ break
493
+
494
+ print(f"πŸ› οΈ AITools: algebraic_notation_tool: result={result}")
495
+ return result
496
+ except Exception as e:
497
+ print(f"⚠️ AITools: algebraic_notation_tool: exception={str(e)}")
498
+ raise RuntimeError(f"Processing failed: {str(e)}")
499
+
500
+ def final_answer_tool(question: str, answer: str) -> str:
501
+ """Given a question and initial answer, get the final answer.
502
+
503
+ Args:
504
+ question (str): The question
505
+ answer (str): The initial answer
506
+
507
+ Returns:
508
+ str: Final answer
509
+
510
+ Raises:
511
+ RuntimeError: If processing fails
512
+ """
513
+ print(f"πŸ› οΈ AITools: final_answer_tool: question={question}, answer={answer}")
514
+
515
+ try:
516
+ client = AITools._get_client()
517
+
518
+ prompt = PROMPT_FINAL_ANSWER.format(question=question, answer=answer)
519
+
520
+ response = client.models.generate_content(
521
+ model=LLM_FINAL_ANSWER,
522
+ contents=[prompt],
523
+ config=types.GenerateContentConfig(
524
+ thinking_config=types.ThinkingConfig(
525
+ thinking_level=THINKING_LEVEL_FINAL_ANSWER
526
+ )
527
+ )
528
+ )
529
+
530
+ result = response.text.strip()
531
+ print(f"πŸ› οΈ AITools: final_answer_tool: result={result}")
532
+ return result
533
+ except Exception as e:
534
+ print(f"⚠️ AITools: final_answer_tool: exception={str(e)}")
535
+ raise RuntimeError(f"Processing failed: {str(e)}")
agents/tools/deterministic_tools.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # References:
2
+
3
+ # https://docs.crewai.com/introduction
4
+
5
+ from crewai.tools import tool
6
+
7
+ class DeterministicTools():
8
+ # Arithmetic
9
+
10
+ @tool("Add Tool")
11
+ def add_tool(a: float, b: float) -> float:
12
+ """Add two numbers.
13
+
14
+ Args:
15
+ a (float): First number
16
+ b (float): Second number
17
+
18
+ Returns:
19
+ number: Result
20
+ """
21
+ print(f"πŸ€– DeterministicTools: add_tool: a={a}, b={b}")
22
+
23
+ result = a + b
24
+ print(f"πŸ€– DeterministicTools: add_tool: result={result}")
25
+ return result
26
+
27
+ @tool("Subtract Tool")
28
+ def subtract_tool(a: float, b: float) -> float:
29
+ """Subtract two numbers.
30
+
31
+ Args:
32
+ a (float): First number
33
+ b (float): Second number
34
+
35
+ Returns:
36
+ number: Result
37
+ """
38
+ print(f"πŸ€– DeterministicTools: subtract_tool: a={a}, b={b}")
39
+
40
+ result = a - b
41
+ print(f"πŸ€– DeterministicTools: subtract_tool: result={result}")
42
+ return result
43
+
44
+ @tool("Multiply Tool")
45
+ def multiply_tool(a: float, b: float) -> float:
46
+ """Multiply two numbers.
47
+ Args:
48
+ a (float): First number
49
+ b (float): Second number
50
+
51
+ Returns:
52
+ number: Result
53
+ """
54
+ print(f"πŸ€– DeterministicTools: multiply_tool: a={a}, b={b}")
55
+
56
+ result = a * b
57
+ print(f"πŸ€– DeterministicTools: multiply_tool: result={result}")
58
+ return result
59
+
60
+ @tool("Divide Tool")
61
+ def divide_tool(a: float, b: float) -> float:
62
+ """Divide two numbers.
63
+
64
+ Args:
65
+ a (float): First number
66
+ b (float): Second number
67
+
68
+ Returns:
69
+ number: Result
70
+
71
+ Raises:
72
+ RuntimeError: If processing fails
73
+ """
74
+ print(f"πŸ€– DeterministicTools: divide_tool: a={a}, b={b}")
75
+
76
+ if b == 0:
77
+ raise RuntimeError("Cannot divide by zero.")
78
+
79
+ result = a / b
80
+ print(f"πŸ€– DeterministicTools: divide_tool: result={result}")
81
+ return result
82
+
83
+ @tool("Modulus Tool")
84
+ def modulus_tool(a: float, b: float) -> float:
85
+ """Get the modulus of two numbers.
86
+
87
+ Args:
88
+ a (float): First number
89
+ b (float): Second number
90
+
91
+ Returns:
92
+ number: Result
93
+ """
94
+ print(f"πŸ€– DeterministicTools: modulus_tool: a={a}, b={b}")
95
+
96
+ result = a % b
97
+ print(f"πŸ€– DeterministicTools: modulus_tool: result={result}")
98
+ return result
agents/tools/mcp_tools.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # References:
2
+
3
+ # https://docs.crewai.com/introduction
4
+
5
+ import json, os
6
+ from agents.mcp.mcp_client import (
7
+ call_mcp_tool,
8
+ MCP_SSE_URL_CHESS_POSITION_EVALUATION,
9
+ MCP_TOOL_CHESS_POSITION_EVALUATION
10
+ )
11
+ from crewai.tools import tool
12
+
13
+ class MCPTools():
14
+ # Chess position evaluation
15
+
16
+ @tool("Best Move Tool")
17
+ def best_move_tool(fen: str) -> str:
18
+ """Get best move with continuation in UCI notation for chess position in FEN.
19
+
20
+ Args:
21
+ fen (str): Chess position in FEN
22
+
23
+ Returns:
24
+ str: Best move with continuation in UCI notation (e.g., 'f7f2 e4e5 f2f1')
25
+
26
+ Raises:
27
+ RuntimeError: If processing fails
28
+
29
+ """
30
+ print(f"πŸ› οΈ MCPTools: best_move_tool: fen={fen}")
31
+
32
+ try:
33
+ print(MCP_SSE_URL_CHESS_POSITION_EVALUATION)
34
+
35
+ mcp_url = os.getenv("MCP_SSE_URL", MCP_SSE_URL_CHESS_POSITION_EVALUATION)
36
+
37
+ raw_result = call_mcp_tool(
38
+ mcp_url=mcp_url,
39
+ tool_name=MCP_TOOL_CHESS_POSITION_EVALUATION,
40
+ arguments={"fen": fen}
41
+ )
42
+
43
+ if isinstance(raw_result, str):
44
+ try:
45
+ raw_result = eval(raw_result)
46
+ except:
47
+ pass
48
+
49
+ if isinstance(raw_result, tuple) and len(raw_result) > 0:
50
+ raw_result = raw_result[0]
51
+
52
+ result = None
53
+
54
+ if isinstance(raw_result, dict) and "continuation" in raw_result:
55
+ result = raw_result["continuation"]
56
+ else:
57
+ result = raw_result
58
+
59
+ print(f"πŸ› οΈ MCPTools: best_move_tool: result={result}")
60
+
61
+ return result
62
+ except Exception as e:
63
+ print(f"⚠️ MCPTools: best_move_tool: exception={str(e)}")
64
+ raise RuntimeError(f"Processing failed: {str(e)}")