bstraehle commited on
Commit
b2e9a38
·
verified ·
1 Parent(s): ab53135

Update agents/tools/ai_tools.py

Browse files
Files changed (1) hide show
  1. agents/tools/ai_tools.py +63 -48
agents/tools/ai_tools.py CHANGED
@@ -71,14 +71,16 @@ class AITools():
71
  raise RuntimeError("Media file processing failed")
72
  time.sleep(1)
73
 
 
 
 
 
 
 
74
  response = client.models.generate_content(
75
  model=current_model,
76
  contents=[file, question],
77
- config=types.GenerateContentConfig(
78
- thinking_config=types.ThinkingConfig(
79
- thinking_level=THINKING_LEVEL_MEDIA_ANALYSIS
80
- )
81
- )
82
  )
83
 
84
  result = response.text
@@ -124,15 +126,16 @@ class AITools():
124
 
125
  for attempt in range(2):
126
  try:
 
 
 
 
 
 
127
  response = client.models.generate_content(
128
  model=model,
129
  contents=question,
130
- config=types.GenerateContentConfig(
131
- tools=[types.Tool(google_search=types.GoogleSearch())],
132
- thinking_config=types.ThinkingConfig(
133
- thinking_level=THINKING_LEVEL_WEB_SEARCH
134
- )
135
- )
136
  )
137
 
138
  result = response.text
@@ -269,17 +272,19 @@ class AITools():
269
 
270
  for attempt in range(2):
271
  try:
 
 
 
 
 
 
272
  result = client.models.generate_content(
273
  model=model,
274
  contents=types.Content(
275
  parts=[types.Part(file_data=types.FileData(file_uri=url)),
276
  types.Part(text=question)]
277
  ),
278
- config=types.GenerateContentConfig(
279
- thinking_config=types.ThinkingConfig(
280
- thinking_level=THINKING_LEVEL_YOUTUBE_ANALYSIS
281
- )
282
- )
283
  )
284
 
285
  print(f"🛠️ AITools: youtube_analysis_tool: model={model}")
@@ -331,14 +336,16 @@ class AITools():
331
  file = client.files.upload(file=file_path)
332
  contents = [file, question]
333
 
 
 
 
 
 
 
334
  response = client.models.generate_content(
335
  model=model,
336
  contents=contents,
337
- config=types.GenerateContentConfig(
338
- thinking_config=types.ThinkingConfig(
339
- thinking_level=THINKING_LEVEL_DOCUMENT_ANALYSIS
340
- )
341
- )
342
  )
343
 
344
  result = response.text
@@ -377,15 +384,16 @@ class AITools():
377
 
378
  for attempt in range(2):
379
  try:
 
 
 
 
 
 
380
  response = client.models.generate_content(
381
  model=model,
382
  contents=[f"{question}\n{json_data}"],
383
- config=types.GenerateContentConfig(
384
- tools=[types.Tool(code_execution=types.ToolCodeExecution)],
385
- thinking_config=types.ThinkingConfig(
386
- thinking_level=THINKING_LEVEL_CODE_GENERATION
387
- )
388
- ),
389
  )
390
 
391
  result = AITools._extract_execution_result(response)
@@ -427,15 +435,16 @@ class AITools():
427
  try:
428
  file = client.files.upload(file=file_path)
429
 
 
 
 
 
 
 
430
  response = client.models.generate_content(
431
  model=model,
432
  contents=[file, question],
433
- config=types.GenerateContentConfig(
434
- tools=[types.Tool(code_execution=types.ToolCodeExecution)],
435
- thinking_config=types.ThinkingConfig(
436
- thinking_level=THINKING_LEVEL_CODE_EXECUTION
437
- )
438
- ),
439
  )
440
 
441
  result = AITools._extract_execution_result(response)
@@ -494,14 +503,16 @@ class AITools():
494
  ]
495
  )
496
 
 
 
 
 
 
 
497
  response = client.models.generate_content(
498
  model=model,
499
  contents=[content],
500
- config=types.GenerateContentConfig(
501
- thinking_config=types.ThinkingConfig(
502
- thinking_level=THINKING_LEVEL_IMAGE_TO_FEN
503
- )
504
- )
505
  )
506
 
507
  result = None
@@ -567,14 +578,16 @@ class AITools():
567
  ]
568
  )
569
 
 
 
 
 
 
 
570
  response = client.models.generate_content(
571
  model=model,
572
  contents=[content],
573
- config=types.GenerateContentConfig(
574
- thinking_config=types.ThinkingConfig(
575
- thinking_level=THINKING_LEVEL_ALGEBRAIC_NOTATION
576
- )
577
- )
578
  )
579
 
580
  result = None
@@ -620,14 +633,16 @@ class AITools():
620
  try:
621
  prompt = PROMPT_FINAL_ANSWER.format(question=question, answer=answer)
622
 
 
 
 
 
 
 
623
  response = client.models.generate_content(
624
  model=model,
625
  contents=[prompt],
626
- config=types.GenerateContentConfig(
627
- thinking_config=types.ThinkingConfig(
628
- thinking_level=THINKING_LEVEL_FINAL_ANSWER
629
- )
630
- )
631
  )
632
 
633
  result = response.text.strip()
 
71
  raise RuntimeError("Media file processing failed")
72
  time.sleep(1)
73
 
74
+ config_params = {}
75
+ if current_model != LLM_FALLBACK:
76
+ config_params["thinking_config"] = types.ThinkingConfig(
77
+ thinking_level=THINKING_LEVEL_MEDIA_ANALYSIS
78
+ )
79
+
80
  response = client.models.generate_content(
81
  model=current_model,
82
  contents=[file, question],
83
+ config=types.GenerateContentConfig(**config_params)
 
 
 
 
84
  )
85
 
86
  result = response.text
 
126
 
127
  for attempt in range(2):
128
  try:
129
+ config_params = {"tools": [types.Tool(google_search=types.GoogleSearch())]}
130
+ if model != LLM_FALLBACK:
131
+ config_params["thinking_config"] = types.ThinkingConfig(
132
+ thinking_level=THINKING_LEVEL_WEB_SEARCH
133
+ )
134
+
135
  response = client.models.generate_content(
136
  model=model,
137
  contents=question,
138
+ config=types.GenerateContentConfig(**config_params)
 
 
 
 
 
139
  )
140
 
141
  result = response.text
 
272
 
273
  for attempt in range(2):
274
  try:
275
+ config_params = {}
276
+ if model != LLM_FALLBACK:
277
+ config_params["thinking_config"] = types.ThinkingConfig(
278
+ thinking_level=THINKING_LEVEL_YOUTUBE_ANALYSIS
279
+ )
280
+
281
  result = client.models.generate_content(
282
  model=model,
283
  contents=types.Content(
284
  parts=[types.Part(file_data=types.FileData(file_uri=url)),
285
  types.Part(text=question)]
286
  ),
287
+ config=types.GenerateContentConfig(**config_params)
 
 
 
 
288
  )
289
 
290
  print(f"🛠️ AITools: youtube_analysis_tool: model={model}")
 
336
  file = client.files.upload(file=file_path)
337
  contents = [file, question]
338
 
339
+ config_params = {}
340
+ if model != LLM_FALLBACK:
341
+ config_params["thinking_config"] = types.ThinkingConfig(
342
+ thinking_level=THINKING_LEVEL_DOCUMENT_ANALYSIS
343
+ )
344
+
345
  response = client.models.generate_content(
346
  model=model,
347
  contents=contents,
348
+ config=types.GenerateContentConfig(**config_params)
 
 
 
 
349
  )
350
 
351
  result = response.text
 
384
 
385
  for attempt in range(2):
386
  try:
387
+ config_params = {"tools": [types.Tool(code_execution=types.ToolCodeExecution)]}
388
+ if model != LLM_FALLBACK:
389
+ config_params["thinking_config"] = types.ThinkingConfig(
390
+ thinking_level=THINKING_LEVEL_CODE_GENERATION
391
+ )
392
+
393
  response = client.models.generate_content(
394
  model=model,
395
  contents=[f"{question}\n{json_data}"],
396
+ config=types.GenerateContentConfig(**config_params),
 
 
 
 
 
397
  )
398
 
399
  result = AITools._extract_execution_result(response)
 
435
  try:
436
  file = client.files.upload(file=file_path)
437
 
438
+ config_params = {"tools": [types.Tool(code_execution=types.ToolCodeExecution)]}
439
+ if model != LLM_FALLBACK:
440
+ config_params["thinking_config"] = types.ThinkingConfig(
441
+ thinking_level=THINKING_LEVEL_CODE_EXECUTION
442
+ )
443
+
444
  response = client.models.generate_content(
445
  model=model,
446
  contents=[file, question],
447
+ config=types.GenerateContentConfig(**config_params),
 
 
 
 
 
448
  )
449
 
450
  result = AITools._extract_execution_result(response)
 
503
  ]
504
  )
505
 
506
+ config_params = {}
507
+ if model != LLM_FALLBACK:
508
+ config_params["thinking_config"] = types.ThinkingConfig(
509
+ thinking_level=THINKING_LEVEL_IMAGE_TO_FEN
510
+ )
511
+
512
  response = client.models.generate_content(
513
  model=model,
514
  contents=[content],
515
+ config=types.GenerateContentConfig(**config_params)
 
 
 
 
516
  )
517
 
518
  result = None
 
578
  ]
579
  )
580
 
581
+ config_params = {}
582
+ if model != LLM_FALLBACK:
583
+ config_params["thinking_config"] = types.ThinkingConfig(
584
+ thinking_level=THINKING_LEVEL_ALGEBRAIC_NOTATION
585
+ )
586
+
587
  response = client.models.generate_content(
588
  model=model,
589
  contents=[content],
590
+ config=types.GenerateContentConfig(**config_params)
 
 
 
 
591
  )
592
 
593
  result = None
 
633
  try:
634
  prompt = PROMPT_FINAL_ANSWER.format(question=question, answer=answer)
635
 
636
+ config_params = {}
637
+ if model != LLM_FALLBACK:
638
+ config_params["thinking_config"] = types.ThinkingConfig(
639
+ thinking_level=THINKING_LEVEL_FINAL_ANSWER
640
+ )
641
+
642
  response = client.models.generate_content(
643
  model=model,
644
  contents=[prompt],
645
+ config=types.GenerateContentConfig(**config_params)
 
 
 
 
646
  )
647
 
648
  result = response.text.strip()