bstraehle commited on
Commit
2fbdec9
·
verified ·
1 Parent(s): a35fb44

Update agents/tools/ai_tools.py

Browse files
Files changed (1) hide show
  1. agents/tools/ai_tools.py +43 -9
agents/tools/ai_tools.py CHANGED
@@ -48,10 +48,13 @@ class AITools():
48
  model=LLM_WEB_SEARCH,
49
  contents=question,
50
  config=types.GenerateContentConfig(
51
- tools=[types.Tool(google_search=types.GoogleSearch())]
 
 
 
52
  )
53
  )
54
-
55
  return response.text.strip()
56
  except Exception as e:
57
  raise RuntimeError(f"Processing failed: {str(e)}")
@@ -133,7 +136,7 @@ class AITools():
133
  config=types.GenerateContentConfig(
134
  thinking_config=types.ThinkingConfig(
135
  thinking_level=THINKING_LEVEL
136
- ),
137
  )
138
  )
139
 
@@ -173,7 +176,12 @@ class AITools():
173
 
174
  response = client.models.generate_content(
175
  model=LLM_IMAGE_ANALYSIS,
176
- contents=[file, question]
 
 
 
 
 
177
  )
178
 
179
  return response.text.strip()
@@ -201,7 +209,12 @@ class AITools():
201
 
202
  response = client.models.generate_content(
203
  model=LLM_AUDIO_ANALYSIS,
204
- contents=[file, question]
 
 
 
 
 
205
  )
206
 
207
  return response.text.strip()
@@ -229,7 +242,12 @@ class AITools():
229
 
230
  response = client.models.generate_content(
231
  model=LLM_VIDEO_ANALYSIS,
232
- contents=[file, question]
 
 
 
 
 
233
  )
234
 
235
  return response.text.strip()
@@ -258,6 +276,11 @@ class AITools():
258
  contents=types.Content(
259
  parts=[types.Part(file_data=types.FileData(file_uri=url)),
260
  types.Part(text=question)]
 
 
 
 
 
261
  )
262
  )
263
  except Exception as e:
@@ -296,7 +319,12 @@ class AITools():
296
 
297
  response = client.models.generate_content(
298
  model=LLM_DOCUMENT_ANALYSIS,
299
- contents=contents
 
 
 
 
 
300
  )
301
 
302
  return response.text.strip()
@@ -323,7 +351,10 @@ class AITools():
323
  model=LLM_CODE_GENERATION,
324
  contents=[f"{question}\n{json_data}"],
325
  config=types.GenerateContentConfig(
326
- tools=[types.Tool(code_execution=types.ToolCodeExecution)]
 
 
 
327
  ),
328
  )
329
 
@@ -356,7 +387,10 @@ class AITools():
356
  model=LLM_CODE_EXECUTION,
357
  contents=[file, question],
358
  config=types.GenerateContentConfig(
359
- tools=[types.Tool(code_execution=types.ToolCodeExecution)]
 
 
 
360
  ),
361
  )
362
 
 
48
  model=LLM_WEB_SEARCH,
49
  contents=question,
50
  config=types.GenerateContentConfig(
51
+ tools=[types.Tool(google_search=types.GoogleSearch())],
52
+ thinking_config=types.ThinkingConfig(
53
+ thinking_level=THINKING_LEVEL
54
+ )
55
  )
56
  )
57
+
58
  return response.text.strip()
59
  except Exception as e:
60
  raise RuntimeError(f"Processing failed: {str(e)}")
 
136
  config=types.GenerateContentConfig(
137
  thinking_config=types.ThinkingConfig(
138
  thinking_level=THINKING_LEVEL
139
+ )
140
  )
141
  )
142
 
 
176
 
177
  response = client.models.generate_content(
178
  model=LLM_IMAGE_ANALYSIS,
179
+ contents=[file, question],
180
+ config=types.GenerateContentConfig(
181
+ thinking_config=types.ThinkingConfig(
182
+ thinking_level=THINKING_LEVEL
183
+ )
184
+ )
185
  )
186
 
187
  return response.text.strip()
 
209
 
210
  response = client.models.generate_content(
211
  model=LLM_AUDIO_ANALYSIS,
212
+ contents=[file, question],
213
+ config=types.GenerateContentConfig(
214
+ thinking_config=types.ThinkingConfig(
215
+ thinking_level=THINKING_LEVEL
216
+ )
217
+ )
218
  )
219
 
220
  return response.text.strip()
 
242
 
243
  response = client.models.generate_content(
244
  model=LLM_VIDEO_ANALYSIS,
245
+ contents=[file, question],
246
+ config=types.GenerateContentConfig(
247
+ thinking_config=types.ThinkingConfig(
248
+ thinking_level=THINKING_LEVEL
249
+ )
250
+ )
251
  )
252
 
253
  return response.text.strip()
 
276
  contents=types.Content(
277
  parts=[types.Part(file_data=types.FileData(file_uri=url)),
278
  types.Part(text=question)]
279
+ ),
280
+ config=types.GenerateContentConfig(
281
+ thinking_config=types.ThinkingConfig(
282
+ thinking_level=THINKING_LEVEL
283
+ )
284
  )
285
  )
286
  except Exception as e:
 
319
 
320
  response = client.models.generate_content(
321
  model=LLM_DOCUMENT_ANALYSIS,
322
+ contents=contents,
323
+ config=types.GenerateContentConfig(
324
+ thinking_config=types.ThinkingConfig(
325
+ thinking_level=THINKING_LEVEL
326
+ )
327
+ )
328
  )
329
 
330
  return response.text.strip()
 
351
  model=LLM_CODE_GENERATION,
352
  contents=[f"{question}\n{json_data}"],
353
  config=types.GenerateContentConfig(
354
+ tools=[types.Tool(code_execution=types.ToolCodeExecution)],
355
+ thinking_config=types.ThinkingConfig(
356
+ thinking_level=THINKING_LEVEL
357
+ )
358
  ),
359
  )
360
 
 
387
  model=LLM_CODE_EXECUTION,
388
  contents=[file, question],
389
  config=types.GenerateContentConfig(
390
+ tools=[types.Tool(code_execution=types.ToolCodeExecution)],
391
+ thinking_config=types.ThinkingConfig(
392
+ thinking_level=THINKING_LEVEL
393
+ )
394
  ),
395
  )
396