Kackle commited on
Commit
0eaa154
·
verified ·
1 Parent(s): 05fd60f

Update gemini_agent.py

Browse files
Files changed (1) hide show
  1. gemini_agent.py +6 -40
gemini_agent.py CHANGED
@@ -6,8 +6,8 @@ import re
6
  import time
7
  import asyncio
8
  # Add LangChain tools for Wikipedia and DuckDuckGo
9
- from langchain.tools import DuckDuckGoSearchRun, WikipediaQueryRun
10
- from langchain.utilities import WikipediaAPIWrapper
11
 
12
  load_dotenv()
13
 
@@ -19,7 +19,7 @@ class GeminiAgent:
19
  api_key = os.getenv('GOOGLE_API_KEY')
20
  genai.configure(api_key=api_key)
21
 
22
- self.model = genai.GenerativeModel('gemini-2.0-flash-exp')
23
  self.last_request_time = 0
24
  self.min_request_interval = 6.0 # 6 seconds between requests (10 per minute limit)
25
 
@@ -162,7 +162,6 @@ Provide only the direct answer. If it's a quote, give just the quoted text. If i
162
 
163
  async def _handle_text_question(self, question: str) -> str:
164
  """Handle regular text-based questions"""
165
- prompt = ""
166
  # Only use retrieval for explicit web/Wikipedia questions
167
  def is_explicit_retrieval_question(question):
168
  q = question.lower()
@@ -186,43 +185,10 @@ Provide only the direct answer. If it's a quote, give just the quoted text. If i
186
  ddg_context = self.ddg_tool.run(question)
187
  except Exception as e:
188
  print(f"DuckDuckGo tool failed: {e}")
189
- # Handle attached file questions with enhanced prompts
190
- if 'attached' in question.lower():
191
- if 'python code' in question.lower():
192
- prompt = f"""This question refers to attached Python code. Based on typical code execution patterns, provide the most likely numeric output:\n\n{question}\n\nAnswer:"""
193
- elif '.mp3' in question.lower():
194
- prompt = f"""This question refers to an attached audio file. Provide the most likely answer based on the context:\n\n{question}\n\nAnswer:"""
195
- else:
196
- prompt = f"""This question refers to an attached file. Provide the most likely answer:\n\n{question}\n\nAnswer:"""
197
- # Handle chess position question
198
- elif 'chess position' in question.lower() and 'image' in question.lower():
199
- prompt = f"""This is a chess question with an attached image. Provide the best chess move in algebraic notation:\n\n{question}\n\nAnswer:"""
200
- # Handle list extraction and formatting
201
- elif (
202
- 'alphabetize' in question.lower() or
203
- 'comma separated' in question.lower() or
204
- 'list' in question.lower() or
205
- 'ingredients' in question.lower() or
206
- 'page numbers' in question.lower() or
207
- 'vegetables' in question.lower()
208
- ):
209
- # Add domain definition for botanical vegetables
210
- if 'vegetable' in question.lower() and ('botany' in question.lower() or 'botanical' in question.lower()):
211
- definition = ("In botany, a vegetable is any edible part of a plant that is not a fruit or seed. "
212
- "Fruits contain seeds and develop from the ovary of a flower. Use this definition.")
213
- prompt = f"{definition}\n\n{question}\n\nList only the requested items, alphabetized, comma separated, and do not include any explanations or extra words."
214
- else:
215
- prompt = f"{question}\n\nList only the requested items, alphabetized, comma separated, and do not include any explanations or extra words."
216
- # Create enhanced prompt based on question type
217
- elif 'how many' in question.lower() or 'what is the' in question.lower():
218
- prompt = f"""Provide only the exact answer to this question. No explanations, just the specific number, name, or fact requested:\n\n{question}\n\nAnswer:"""
219
- elif 'who' in question.lower():
220
- prompt = f"""Provide only the name requested. No explanations or additional context:\n\n{question}\n\nAnswer:"""
221
- elif 'where' in question.lower():
222
- prompt = f"""Provide only the location requested. No explanations:\n\n{question}\n\nAnswer:"""
223
- else:
224
- prompt = f"""Answer this question with only the essential information requested:\n\n{question}\n\nAnswer:"""
225
 
 
 
 
226
  # Prepend context to the prompt if available and likely relevant
227
  def is_good_context(context):
228
  return context and not any(x in context.lower() for x in ["not found", "no results", "does not contain information"])
 
6
  import time
7
  import asyncio
8
  # Add LangChain tools for Wikipedia and DuckDuckGo
9
+ from langchain_community.tools import DuckDuckGoSearchRun, WikipediaQueryRun
10
+ from langchain_community.utilities import WikipediaAPIWrapper
11
 
12
  load_dotenv()
13
 
 
19
  api_key = os.getenv('GOOGLE_API_KEY')
20
  genai.configure(api_key=api_key)
21
 
22
+ self.model = genai.GenerativeModel('gemini-1.5-pro-latest')
23
  self.last_request_time = 0
24
  self.min_request_interval = 6.0 # 6 seconds between requests (10 per minute limit)
25
 
 
162
 
163
  async def _handle_text_question(self, question: str) -> str:
164
  """Handle regular text-based questions"""
 
165
  # Only use retrieval for explicit web/Wikipedia questions
166
  def is_explicit_retrieval_question(question):
167
  q = question.lower()
 
185
  ddg_context = self.ddg_tool.run(question)
186
  except Exception as e:
187
  print(f"DuckDuckGo tool failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
189
+ # Simplified prompt construction
190
+ prompt = f"Answer the following question:\n\n{question}"
191
+
192
  # Prepend context to the prompt if available and likely relevant
193
  def is_good_context(context):
194
  return context and not any(x in context.lower() for x in ["not found", "no results", "does not contain information"])