Delete src
Browse files- src/__pycache__/agent.cpython-311.pyc +0 -0
- src/__pycache__/config.cpython-311.pyc +0 -0
- src/__pycache__/ui.cpython-311.pyc +0 -0
- src/__pycache__/utils.cpython-311.pyc +0 -0
- src/agent.py +0 -487
- src/config.py +0 -315
- src/gemini_client.py +0 -232
- src/ui.py +0 -184
- src/utils.py +0 -302
src/__pycache__/agent.cpython-311.pyc
DELETED
|
Binary file (9.07 kB)
|
|
|
src/__pycache__/config.cpython-311.pyc
DELETED
|
Binary file (2.42 kB)
|
|
|
src/__pycache__/ui.cpython-311.pyc
DELETED
|
Binary file (9.35 kB)
|
|
|
src/__pycache__/utils.cpython-311.pyc
DELETED
|
Binary file (5.43 kB)
|
|
|
src/agent.py
DELETED
|
@@ -1,487 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Enhanced Gemini AI Agent implementation with improved accuracy strategies and multi-API key support.
|
| 3 |
-
"""
|
| 4 |
-
|
| 5 |
-
import logging
|
| 6 |
-
import asyncio
|
| 7 |
-
import re
|
| 8 |
-
from typing import List, Dict, Any, Optional
|
| 9 |
-
import google.generativeai as genai
|
| 10 |
-
from google.generativeai.types import HarmCategory, HarmBlockThreshold
|
| 11 |
-
from src.config import Config
|
| 12 |
-
from src.utils import sanitize_input, format_response
|
| 13 |
-
|
| 14 |
-
logger = logging.getLogger(__name__)
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
class GeminiClientWithFallback:
|
| 18 |
-
"""
|
| 19 |
-
Enhanced Gemini client that automatically switches API keys when quota is exceeded.
|
| 20 |
-
Integrated into the existing agent architecture.
|
| 21 |
-
"""
|
| 22 |
-
|
| 23 |
-
def __init__(self, config: Config):
|
| 24 |
-
"""Initialize the client with configuration."""
|
| 25 |
-
self.config = config
|
| 26 |
-
self.current_model = None
|
| 27 |
-
self._initialize_model()
|
| 28 |
-
|
| 29 |
-
def _initialize_model(self):
|
| 30 |
-
"""Initialize the Gemini model with current API key."""
|
| 31 |
-
try:
|
| 32 |
-
current_key = self.config.get_current_api_key()
|
| 33 |
-
genai.configure(api_key=current_key)
|
| 34 |
-
|
| 35 |
-
# Enhanced safety settings
|
| 36 |
-
safety_settings = {
|
| 37 |
-
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
|
| 38 |
-
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
|
| 39 |
-
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
| 40 |
-
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
| 41 |
-
}
|
| 42 |
-
|
| 43 |
-
self.current_model = genai.GenerativeModel(
|
| 44 |
-
model_name=self.config.model_name,
|
| 45 |
-
safety_settings=safety_settings
|
| 46 |
-
)
|
| 47 |
-
|
| 48 |
-
current_key_info = self.config.api_keys[self.config.current_key_index]
|
| 49 |
-
logger.info(f"Initialized model with API key: {current_key_info.label}")
|
| 50 |
-
|
| 51 |
-
except RuntimeError as e:
|
| 52 |
-
logger.error(f"Failed to initialize model: {e}")
|
| 53 |
-
self.current_model = None
|
| 54 |
-
|
| 55 |
-
def _switch_api_key(self, error_message: str = "") -> bool:
|
| 56 |
-
"""Switch to next available API key."""
|
| 57 |
-
if self.config.handle_quota_error(error_message):
|
| 58 |
-
self._initialize_model()
|
| 59 |
-
return self.current_model is not None
|
| 60 |
-
return False
|
| 61 |
-
|
| 62 |
-
def generate_content(self, prompt: str, generation_config: Optional[Dict] = None, max_retries: Optional[int] = None) -> Optional[str]:
|
| 63 |
-
"""Generate content with automatic API key fallback."""
|
| 64 |
-
if max_retries is None:
|
| 65 |
-
max_retries = self.config.retry_attempts
|
| 66 |
-
|
| 67 |
-
last_error = None
|
| 68 |
-
|
| 69 |
-
for attempt in range(max_retries + 1):
|
| 70 |
-
if not self.current_model:
|
| 71 |
-
logger.warning("No model available. Trying to initialize...")
|
| 72 |
-
self._initialize_model()
|
| 73 |
-
if not self.current_model:
|
| 74 |
-
continue
|
| 75 |
-
|
| 76 |
-
try:
|
| 77 |
-
current_key_info = self.config.api_keys[self.config.current_key_index]
|
| 78 |
-
logger.debug(f"Attempt {attempt + 1}/{max_retries + 1} with {current_key_info.label}")
|
| 79 |
-
|
| 80 |
-
# Make the request with custom generation config if provided
|
| 81 |
-
if generation_config:
|
| 82 |
-
config_obj = genai.types.GenerationConfig(**generation_config)
|
| 83 |
-
response = self.current_model.generate_content(prompt, generation_config=config_obj)
|
| 84 |
-
else:
|
| 85 |
-
response = self.current_model.generate_content(prompt)
|
| 86 |
-
|
| 87 |
-
# Record successful request
|
| 88 |
-
self.config.record_successful_request()
|
| 89 |
-
|
| 90 |
-
# Return the generated text
|
| 91 |
-
if response and response.text:
|
| 92 |
-
return response.text.strip()
|
| 93 |
-
else:
|
| 94 |
-
logger.warning("Empty response received")
|
| 95 |
-
return None
|
| 96 |
-
|
| 97 |
-
except Exception as e:
|
| 98 |
-
error_message = str(e)
|
| 99 |
-
last_error = error_message
|
| 100 |
-
logger.warning(f"Request failed: {error_message}")
|
| 101 |
-
|
| 102 |
-
# Check if it's a quota error
|
| 103 |
-
if "429" in error_message or "quota" in error_message.lower():
|
| 104 |
-
logger.info("Quota exceeded detected. Attempting to switch API key...")
|
| 105 |
-
|
| 106 |
-
if not self._switch_api_key(error_message):
|
| 107 |
-
logger.error("No more API keys available")
|
| 108 |
-
break
|
| 109 |
-
|
| 110 |
-
# Continue to next attempt with new key
|
| 111 |
-
continue
|
| 112 |
-
|
| 113 |
-
# For other errors, wait a bit before retrying
|
| 114 |
-
elif attempt < max_retries:
|
| 115 |
-
import time
|
| 116 |
-
wait_time = min(2 ** attempt, 10) # Exponential backoff, max 10 seconds
|
| 117 |
-
logger.info(f"Waiting {wait_time} seconds before retry...")
|
| 118 |
-
time.sleep(wait_time)
|
| 119 |
-
else:
|
| 120 |
-
break
|
| 121 |
-
|
| 122 |
-
logger.error(f"All attempts failed. Last error: {last_error}")
|
| 123 |
-
return None
|
| 124 |
-
|
| 125 |
-
def get_status(self) -> Dict[str, Any]:
|
| 126 |
-
"""Get current status of the client and API keys."""
|
| 127 |
-
status = self.config.get_keys_status()
|
| 128 |
-
status.update({
|
| 129 |
-
"model_initialized": self.current_model is not None,
|
| 130 |
-
"current_model": self.config.model_name
|
| 131 |
-
})
|
| 132 |
-
return status
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
class GeminiAgent:
|
| 136 |
-
"""
|
| 137 |
-
Enhanced AI Agent powered by Google's Gemini model with advanced reasoning capabilities and multi-API key support.
|
| 138 |
-
"""
|
| 139 |
-
|
| 140 |
-
def __init__(self, config: Config):
|
| 141 |
-
"""Initialize the Enhanced Gemini Agent with multi-key support."""
|
| 142 |
-
self.config = config
|
| 143 |
-
self.conversation_history: List[Dict[str, str]] = []
|
| 144 |
-
self._initialize_client()
|
| 145 |
-
|
| 146 |
-
def _initialize_client(self):
|
| 147 |
-
"""Initialize the Gemini API client with multi-key support and optimized settings."""
|
| 148 |
-
try:
|
| 149 |
-
# Initialize the enhanced client with fallback support
|
| 150 |
-
self.client = GeminiClientWithFallback(self.config)
|
| 151 |
-
|
| 152 |
-
logger.info(f"Enhanced Gemini client initialized with model: {self.config.model_name}")
|
| 153 |
-
|
| 154 |
-
# Log API key status
|
| 155 |
-
status = self.get_status()
|
| 156 |
-
logger.info(f"Available API keys: {status['total_keys']}")
|
| 157 |
-
current_key_info = status['keys'][status['current_key']]
|
| 158 |
-
logger.info(f"Starting with: {current_key_info['label']}")
|
| 159 |
-
|
| 160 |
-
except Exception as e:
|
| 161 |
-
logger.error(f"Failed to initialize Gemini client: {e}")
|
| 162 |
-
raise
|
| 163 |
-
|
| 164 |
-
def _build_enhanced_system_prompt(self) -> str:
|
| 165 |
-
"""Build an enhanced system prompt for maximum accuracy."""
|
| 166 |
-
return """You are an elite AI research assistant with exceptional analytical capabilities. Your mission is to provide the most accurate, precise, and well-reasoned answers possible.
|
| 167 |
-
|
| 168 |
-
CRITICAL SUCCESS STRATEGIES:
|
| 169 |
-
|
| 170 |
-
1. **DEEP ANALYSIS PROTOCOL**:
|
| 171 |
-
- Read questions multiple times to fully understand what's being asked
|
| 172 |
-
- Identify the exact type of information needed (factual, numerical, analytical, etc.)
|
| 173 |
-
- Break complex questions into sub-components
|
| 174 |
-
- Consider multiple interpretations and choose the most logical one
|
| 175 |
-
|
| 176 |
-
2. **RESEARCH EXCELLENCE**:
|
| 177 |
-
- For factual questions: Provide specific, verifiable information
|
| 178 |
-
- For numerical questions: Show clear calculations and reasoning
|
| 179 |
-
- For analytical questions: Provide structured, logical analysis
|
| 180 |
-
- For historical/biographical questions: Include specific dates, names, and context
|
| 181 |
-
|
| 182 |
-
3. **ACCURACY MAXIMIZATION**:
|
| 183 |
-
- If you're not completely certain, state your confidence level
|
| 184 |
-
- Distinguish between facts you know and reasonable inferences
|
| 185 |
-
- For mathematical problems, double-check your calculations
|
| 186 |
-
- For factual claims, consider if they align with well-established knowledge
|
| 187 |
-
|
| 188 |
-
4. **QUESTION TYPE OPTIMIZATION**:
|
| 189 |
-
- Factual queries: Provide direct, specific answers with key details
|
| 190 |
-
- Quantitative queries: Show step-by-step calculations
|
| 191 |
-
- Comparative queries: Create clear comparisons with specific criteria
|
| 192 |
-
- Creative queries: Balance creativity with logical structure
|
| 193 |
-
|
| 194 |
-
5. **RESPONSE STRUCTURE**:
|
| 195 |
-
- Start with a direct answer to the main question
|
| 196 |
-
- Provide supporting evidence or reasoning
|
| 197 |
-
- Include relevant context when helpful
|
| 198 |
-
- End with a clear, concise summary if the answer is complex
|
| 199 |
-
|
| 200 |
-
SPECIAL FOCUS AREAS:
|
| 201 |
-
- Pay extra attention to numbers, dates, and specific details
|
| 202 |
-
- For "how many" questions, provide exact counts when possible
|
| 203 |
-
- For "when" questions, provide specific timeframes
|
| 204 |
-
- For "who" questions, provide full names and relevant context
|
| 205 |
-
- For "what" questions, provide comprehensive but focused explanations
|
| 206 |
-
|
| 207 |
-
Remember: Accuracy is your top priority. A well-reasoned, precise answer is infinitely better than a vague or uncertain response."""
|
| 208 |
-
|
| 209 |
-
def _analyze_question_type(self, question: str) -> str:
|
| 210 |
-
"""Analyze the question type to apply appropriate strategies."""
|
| 211 |
-
question = question.lower()
|
| 212 |
-
|
| 213 |
-
if any(word in question for word in ['how many', 'count', 'number of']):
|
| 214 |
-
return 'quantitative'
|
| 215 |
-
elif any(word in question for word in ['when', 'date', 'year', 'time']):
|
| 216 |
-
return 'temporal'
|
| 217 |
-
elif any(word in question for word in ['who', 'which person', 'author', 'creator']):
|
| 218 |
-
return 'biographical'
|
| 219 |
-
elif any(word in question for word in ['what is', 'define', 'explain']):
|
| 220 |
-
return 'definitional'
|
| 221 |
-
elif any(word in question for word in ['compare', 'difference', 'versus', 'vs']):
|
| 222 |
-
return 'comparative'
|
| 223 |
-
elif any(word in question for word in ['calculate', 'compute', 'solve']):
|
| 224 |
-
return 'mathematical'
|
| 225 |
-
else:
|
| 226 |
-
return 'general'
|
| 227 |
-
|
| 228 |
-
def _build_enhanced_prompt(self, question: str, question_type: str) -> str:
|
| 229 |
-
"""Build question-specific prompts for maximum accuracy."""
|
| 230 |
-
base_prompt = self._build_enhanced_system_prompt()
|
| 231 |
-
|
| 232 |
-
type_specific_instructions = {
|
| 233 |
-
'quantitative': """
|
| 234 |
-
QUANTITATIVE QUESTION DETECTED:
|
| 235 |
-
- Provide exact numbers whenever possible
|
| 236 |
-
- Show your counting/calculation process
|
| 237 |
-
- Double-check your arithmetic
|
| 238 |
-
- If estimating, clearly state it's an estimate
|
| 239 |
-
- Be precise about the scope of your count
|
| 240 |
-
""",
|
| 241 |
-
'temporal': """
|
| 242 |
-
TEMPORAL QUESTION DETECTED:
|
| 243 |
-
- Provide specific dates, years, or time periods
|
| 244 |
-
- Use exact timeframes when known
|
| 245 |
-
- If uncertain about exact dates, provide the closest known timeframe
|
| 246 |
-
- Consider historical context and sequence of events
|
| 247 |
-
""",
|
| 248 |
-
'biographical': """
|
| 249 |
-
BIOGRAPHICAL QUESTION DETECTED:
|
| 250 |
-
- Provide full names when available
|
| 251 |
-
- Include relevant titles, positions, or roles
|
| 252 |
-
- Add brief context about why this person is notable
|
| 253 |
-
- Verify the person matches the context of the question
|
| 254 |
-
""",
|
| 255 |
-
'definitional': """
|
| 256 |
-
DEFINITIONAL QUESTION DETECTED:
|
| 257 |
-
- Provide clear, accurate definitions
|
| 258 |
-
- Include key characteristics or components
|
| 259 |
-
- Add relevant examples if helpful
|
| 260 |
-
- Ensure completeness without unnecessary complexity
|
| 261 |
-
""",
|
| 262 |
-
'comparative': """
|
| 263 |
-
COMPARATIVE QUESTION DETECTED:
|
| 264 |
-
- Identify specific criteria for comparison
|
| 265 |
-
- Provide structured analysis
|
| 266 |
-
- Highlight key similarities and differences
|
| 267 |
-
- Use concrete examples where possible
|
| 268 |
-
""",
|
| 269 |
-
'mathematical': """
|
| 270 |
-
MATHEMATICAL QUESTION DETECTED:
|
| 271 |
-
- Show all calculation steps clearly
|
| 272 |
-
- Double-check your arithmetic
|
| 273 |
-
- State any assumptions made
|
| 274 |
-
- Present the final answer clearly
|
| 275 |
-
"""
|
| 276 |
-
}
|
| 277 |
-
|
| 278 |
-
specific_instruction = type_specific_instructions.get(question_type, "")
|
| 279 |
-
|
| 280 |
-
return f"{base_prompt}\n\n{specific_instruction}\nQUESTION: {question}\n\nProvide your most accurate, well-reasoned response:"
|
| 281 |
-
|
| 282 |
-
async def process_question(self, question: str, context: Optional[str] = None) -> str:
|
| 283 |
-
"""Process a question with enhanced accuracy strategies and multi-key fallback."""
|
| 284 |
-
try:
|
| 285 |
-
# Sanitize and analyze the question
|
| 286 |
-
clean_question = sanitize_input(question)
|
| 287 |
-
question_type = self._analyze_question_type(clean_question)
|
| 288 |
-
|
| 289 |
-
# Build the enhanced prompt
|
| 290 |
-
enhanced_prompt = self._build_enhanced_prompt(clean_question, question_type)
|
| 291 |
-
|
| 292 |
-
# Add context if provided
|
| 293 |
-
if context:
|
| 294 |
-
enhanced_prompt += f"\n\nAdditional Context: {context}"
|
| 295 |
-
|
| 296 |
-
# Generate response with optimized parameters using multi-key client
|
| 297 |
-
generation_config = {
|
| 298 |
-
'max_output_tokens': self.config.max_tokens,
|
| 299 |
-
'temperature': self.config.temperature,
|
| 300 |
-
'top_p': self.config.top_p,
|
| 301 |
-
'top_k': self.config.top_k,
|
| 302 |
-
}
|
| 303 |
-
|
| 304 |
-
response_text = self.client.generate_content(
|
| 305 |
-
enhanced_prompt,
|
| 306 |
-
generation_config=generation_config
|
| 307 |
-
)
|
| 308 |
-
|
| 309 |
-
if response_text:
|
| 310 |
-
# Post-process the response for accuracy
|
| 311 |
-
processed_response = self._post_process_response(response_text, question_type)
|
| 312 |
-
formatted_response = format_response(processed_response)
|
| 313 |
-
|
| 314 |
-
# Update conversation history
|
| 315 |
-
self._update_history(clean_question, formatted_response)
|
| 316 |
-
|
| 317 |
-
logger.info(f"Successfully processed {question_type} question: {clean_question[:50]}...")
|
| 318 |
-
return formatted_response
|
| 319 |
-
else:
|
| 320 |
-
# Check if all API keys are exhausted
|
| 321 |
-
status = self.get_status()
|
| 322 |
-
available_keys = sum(1 for key in status['keys'] if key['can_make_request'])
|
| 323 |
-
|
| 324 |
-
if available_keys == 0:
|
| 325 |
-
error_msg = "I apologize, but all API keys have exceeded their quotas. Please try again later or add more API keys."
|
| 326 |
-
else:
|
| 327 |
-
error_msg = "I apologize, but I couldn't generate a response. Please try rephrasing your question."
|
| 328 |
-
|
| 329 |
-
logger.warning(f"Failed to generate response. Available keys: {available_keys}")
|
| 330 |
-
return error_msg
|
| 331 |
-
|
| 332 |
-
except Exception as e:
|
| 333 |
-
logger.error(f"Error processing question: {e}")
|
| 334 |
-
return f"I encountered an error while processing your question: {str(e)}"
|
| 335 |
-
|
| 336 |
-
def _post_process_response(self, response: str, question_type: str) -> str:
|
| 337 |
-
"""Post-process responses based on question type for maximum accuracy."""
|
| 338 |
-
|
| 339 |
-
if question_type == 'quantitative':
|
| 340 |
-
# Ensure numerical answers are clearly stated
|
| 341 |
-
numbers = re.findall(r'\b\d+\b', response)
|
| 342 |
-
if numbers and not response.strip().startswith(tuple(numbers)):
|
| 343 |
-
# If the response doesn't start with a number, try to restructure
|
| 344 |
-
pass # Keep original for now, but could enhance further
|
| 345 |
-
|
| 346 |
-
elif question_type == 'temporal':
|
| 347 |
-
# Ensure dates are properly formatted
|
| 348 |
-
response = re.sub(r'\b(\d{4})\b', r'\1', response) # Ensure years are clear
|
| 349 |
-
|
| 350 |
-
# General cleanup
|
| 351 |
-
response = response.strip()
|
| 352 |
-
|
| 353 |
-
# Remove any uncertain language that might reduce confidence
|
| 354 |
-
uncertain_phrases = [
|
| 355 |
-
"I think", "I believe", "probably", "might be", "could be",
|
| 356 |
-
"it seems", "appears to be", "likely"
|
| 357 |
-
]
|
| 358 |
-
|
| 359 |
-
# Only remove uncertainty if we have factual content
|
| 360 |
-
if any(indicator in response.lower() for indicator in ['according to', 'research shows', 'studies indicate']):
|
| 361 |
-
for phrase in uncertain_phrases:
|
| 362 |
-
response = re.sub(f'\\b{phrase}\\b', '', response, flags=re.IGNORECASE)
|
| 363 |
-
response = re.sub(r'\s+', ' ', response) # Clean up extra spaces
|
| 364 |
-
|
| 365 |
-
return response
|
| 366 |
-
|
| 367 |
-
def _update_history(self, question: str, answer: str):
|
| 368 |
-
"""Update conversation history with enhanced tracking."""
|
| 369 |
-
self.conversation_history.append({
|
| 370 |
-
'question': question,
|
| 371 |
-
'answer': answer,
|
| 372 |
-
'question_type': self._analyze_question_type(question)
|
| 373 |
-
})
|
| 374 |
-
|
| 375 |
-
# Keep recent history for context
|
| 376 |
-
if len(self.conversation_history) > self.config.max_history_length:
|
| 377 |
-
self.conversation_history = self.conversation_history[-self.config.max_history_length:]
|
| 378 |
-
|
| 379 |
-
def clear_history(self):
|
| 380 |
-
"""Clear the conversation history."""
|
| 381 |
-
self.conversation_history.clear()
|
| 382 |
-
logger.info("Conversation history cleared")
|
| 383 |
-
|
| 384 |
-
def get_stats(self) -> Dict[str, Any]:
|
| 385 |
-
"""Get enhanced agent statistics including API key information."""
|
| 386 |
-
question_types = {}
|
| 387 |
-
for entry in self.conversation_history:
|
| 388 |
-
q_type = entry.get('question_type', 'unknown')
|
| 389 |
-
question_types[q_type] = question_types.get(q_type, 0) + 1
|
| 390 |
-
|
| 391 |
-
# Get API key status
|
| 392 |
-
api_status = self.get_status()
|
| 393 |
-
|
| 394 |
-
return {
|
| 395 |
-
'model': self.config.model_name,
|
| 396 |
-
'conversation_length': len(self.conversation_history),
|
| 397 |
-
'max_tokens': self.config.max_tokens,
|
| 398 |
-
'temperature': self.config.temperature,
|
| 399 |
-
'question_types_handled': question_types,
|
| 400 |
-
'total_api_keys': api_status['total_keys'],
|
| 401 |
-
'current_api_key': api_status['keys'][api_status['current_key']]['label'],
|
| 402 |
-
'available_keys': sum(1 for key in api_status['keys'] if key['can_make_request'])
|
| 403 |
-
}
|
| 404 |
-
|
| 405 |
-
def get_status(self) -> Dict[str, Any]:
|
| 406 |
-
"""Get current status of the agent and API keys."""
|
| 407 |
-
return self.client.get_status()
|
| 408 |
-
|
| 409 |
-
def get_detailed_status(self) -> str:
|
| 410 |
-
"""Get a formatted status report."""
|
| 411 |
-
status = self.get_status()
|
| 412 |
-
stats = self.get_stats()
|
| 413 |
-
|
| 414 |
-
report = []
|
| 415 |
-
report.append("=== ENHANCED GEMINI AGENT STATUS ===")
|
| 416 |
-
report.append(f"Model: {status['current_model']}")
|
| 417 |
-
report.append(f"Model Initialized: {status['model_initialized']}")
|
| 418 |
-
report.append(f"Total API Keys: {status['total_keys']}")
|
| 419 |
-
report.append(f"Available Keys: {stats['available_keys']}")
|
| 420 |
-
report.append(f"Conversation Length: {stats['conversation_length']}")
|
| 421 |
-
report.append(f"Temperature: {stats['temperature']}")
|
| 422 |
-
report.append(f"Max Tokens: {stats['max_tokens']}")
|
| 423 |
-
report.append("")
|
| 424 |
-
report.append("Question Types Handled:")
|
| 425 |
-
for q_type, count in stats['question_types_handled'].items():
|
| 426 |
-
report.append(f" {q_type}: {count}")
|
| 427 |
-
report.append("")
|
| 428 |
-
report.append("API Keys Status:")
|
| 429 |
-
|
| 430 |
-
for key_info in status['keys']:
|
| 431 |
-
active_indicator = "✓" if key_info['is_active'] else "✗"
|
| 432 |
-
current_indicator = " (CURRENT)" if key_info['is_current'] else ""
|
| 433 |
-
can_request = "✓" if key_info['can_make_request'] else "✗"
|
| 434 |
-
|
| 435 |
-
report.append(
|
| 436 |
-
f" {active_indicator} {key_info['label']}: "
|
| 437 |
-
f"Daily {key_info['daily_usage']}, "
|
| 438 |
-
f"Minute {key_info['minute_usage']}, "
|
| 439 |
-
f"Can Request: {can_request}"
|
| 440 |
-
f"{current_indicator}"
|
| 441 |
-
)
|
| 442 |
-
|
| 443 |
-
return "\n".join(report)
|
| 444 |
-
|
| 445 |
-
def reset_all_keys(self):
|
| 446 |
-
"""Reset all API keys (for testing purposes)."""
|
| 447 |
-
self.client.config.reset_all_keys()
|
| 448 |
-
logger.info("All API keys reset")
|
| 449 |
-
|
| 450 |
-
def switch_to_next_key(self) -> bool:
|
| 451 |
-
"""Manually switch to next available API key."""
|
| 452 |
-
try:
|
| 453 |
-
current_status = self.get_status()
|
| 454 |
-
current_key_label = current_status['keys'][current_status['current_key']]['label']
|
| 455 |
-
|
| 456 |
-
# Force switch by simulating quota error
|
| 457 |
-
success = self.client._switch_api_key("Manual switch requested")
|
| 458 |
-
|
| 459 |
-
if success:
|
| 460 |
-
new_status = self.get_status()
|
| 461 |
-
new_key_label = new_status['keys'][new_status['current_key']]['label']
|
| 462 |
-
logger.info(f"Switched from {current_key_label} to {new_key_label}")
|
| 463 |
-
return True
|
| 464 |
-
else:
|
| 465 |
-
logger.warning("No alternative API keys available")
|
| 466 |
-
return False
|
| 467 |
-
|
| 468 |
-
except Exception as e:
|
| 469 |
-
logger.error(f"Error switching API key: {e}")
|
| 470 |
-
return False
|
| 471 |
-
|
| 472 |
-
# Compatibility method for Gradio interface
|
| 473 |
-
def chat(self, message: str, history: Optional[List[List[str]]] = None) -> str:
|
| 474 |
-
"""
|
| 475 |
-
Chat method for Gradio interface compatibility.
|
| 476 |
-
Processes the message asynchronously and returns the response.
|
| 477 |
-
"""
|
| 478 |
-
try:
|
| 479 |
-
# Run the async process_question method
|
| 480 |
-
loop = asyncio.new_event_loop()
|
| 481 |
-
asyncio.set_event_loop(loop)
|
| 482 |
-
response = loop.run_until_complete(self.process_question(message))
|
| 483 |
-
loop.close()
|
| 484 |
-
return response
|
| 485 |
-
except Exception as e:
|
| 486 |
-
logger.error(f"Error in chat method: {e}")
|
| 487 |
-
return f"I encountered an error: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/config.py
DELETED
|
@@ -1,315 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Enhanced configuration with multiple API keys and automatic fallback.
|
| 3 |
-
"""
|
| 4 |
-
|
| 5 |
-
import os
|
| 6 |
-
import time
|
| 7 |
-
from typing import List, Dict, Any, Optional
|
| 8 |
-
from dataclasses import dataclass
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
@dataclass
|
| 12 |
-
class APIKeyInfo:
|
| 13 |
-
"""Information about an API key."""
|
| 14 |
-
key: str
|
| 15 |
-
label: str
|
| 16 |
-
is_active: bool = True
|
| 17 |
-
requests_used: int = 0
|
| 18 |
-
requests_this_minute: int = 0
|
| 19 |
-
last_request_time: float = 0
|
| 20 |
-
daily_limit: int = 1500 # Gemini free tier daily limit
|
| 21 |
-
minute_limit: int = 15 # Gemini free tier per-minute limit
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
class Config:
|
| 25 |
-
"""
|
| 26 |
-
Enhanced configuration class with multiple API keys and automatic fallback.
|
| 27 |
-
"""
|
| 28 |
-
|
| 29 |
-
def __init__(self):
|
| 30 |
-
"""Initialize configuration with multiple API keys."""
|
| 31 |
-
# Model configuration
|
| 32 |
-
self.model_name = "gemini-2.0-flash"
|
| 33 |
-
self.temperature = 0.7
|
| 34 |
-
self.max_output_tokens = 2048
|
| 35 |
-
self.max_tokens = 4096 # Backward compatibility
|
| 36 |
-
self.timeout_seconds = 30
|
| 37 |
-
self.retry_attempts = 3
|
| 38 |
-
|
| 39 |
-
# Additional backward compatibility attributes
|
| 40 |
-
self.top_p = 0.95
|
| 41 |
-
self.top_k = 40
|
| 42 |
-
self.max_history_length = 10 # Maximum conversation history to maintain
|
| 43 |
-
self.safety_threshold = "BLOCK_MEDIUM_AND_ABOVE" # Safety settings
|
| 44 |
-
self.candidate_count = 1 # Number of response candidates
|
| 45 |
-
|
| 46 |
-
# Initialize API keys
|
| 47 |
-
self.api_keys: List[APIKeyInfo] = []
|
| 48 |
-
self.current_key_index = 0
|
| 49 |
-
self._load_api_keys()
|
| 50 |
-
|
| 51 |
-
if not self.api_keys:
|
| 52 |
-
raise ValueError("No API keys found. Please set GEMINI_API_KEY or GEMINI_API_KEY_1, GEMINI_API_KEY_2, etc.")
|
| 53 |
-
|
| 54 |
-
def _load_api_keys(self):
|
| 55 |
-
"""Load API keys from environment variables."""
|
| 56 |
-
# Try single key first
|
| 57 |
-
single_key = os.getenv("GEMINI_API_KEY")
|
| 58 |
-
if single_key:
|
| 59 |
-
self.api_keys.append(APIKeyInfo(
|
| 60 |
-
key=single_key,
|
| 61 |
-
label="Primary Key"
|
| 62 |
-
))
|
| 63 |
-
|
| 64 |
-
# Try numbered keys
|
| 65 |
-
key_index = 1
|
| 66 |
-
while True:
|
| 67 |
-
key = os.getenv(f"GEMINI_API_KEY_{key_index}")
|
| 68 |
-
if not key:
|
| 69 |
-
break
|
| 70 |
-
self.api_keys.append(APIKeyInfo(
|
| 71 |
-
key=key,
|
| 72 |
-
label=f"Key #{key_index}"
|
| 73 |
-
))
|
| 74 |
-
key_index += 1
|
| 75 |
-
|
| 76 |
-
# If we found numbered keys and a single key, rename the single key
|
| 77 |
-
if len(self.api_keys) > 1 and single_key:
|
| 78 |
-
self.api_keys[0].label = "Key #0 (Primary)"
|
| 79 |
-
|
| 80 |
-
print(f"Loaded {len(self.api_keys)} API key(s)")
|
| 81 |
-
|
| 82 |
-
def get_current_api_key(self) -> str:
|
| 83 |
-
"""Get the current active API key."""
|
| 84 |
-
if not self.api_keys or self.current_key_index >= len(self.api_keys):
|
| 85 |
-
raise RuntimeError("No active API key available")
|
| 86 |
-
|
| 87 |
-
current_key_info = self.api_keys[self.current_key_index]
|
| 88 |
-
if not current_key_info.is_active:
|
| 89 |
-
raise RuntimeError("Current API key is not active")
|
| 90 |
-
|
| 91 |
-
return current_key_info.key
|
| 92 |
-
|
| 93 |
-
def get_generation_config(self) -> Dict[str, Any]:
|
| 94 |
-
"""Get the generation configuration for Gemini."""
|
| 95 |
-
return {
|
| 96 |
-
"temperature": self.temperature,
|
| 97 |
-
"max_output_tokens": self.max_output_tokens,
|
| 98 |
-
"top_p": getattr(self, 'top_p', 0.95),
|
| 99 |
-
"top_k": getattr(self, 'top_k', 40),
|
| 100 |
-
"candidate_count": getattr(self, 'candidate_count', 1),
|
| 101 |
-
}
|
| 102 |
-
|
| 103 |
-
def get_safety_settings(self) -> List[Dict[str, str]]:
|
| 104 |
-
"""Get safety settings for Gemini."""
|
| 105 |
-
from google.generativeai.types import HarmCategory, HarmBlockThreshold
|
| 106 |
-
|
| 107 |
-
# Return basic safety settings
|
| 108 |
-
return [
|
| 109 |
-
{
|
| 110 |
-
"category": "HARM_CATEGORY_HARASSMENT",
|
| 111 |
-
"threshold": getattr(self, 'safety_threshold', "BLOCK_MEDIUM_AND_ABOVE")
|
| 112 |
-
},
|
| 113 |
-
{
|
| 114 |
-
"category": "HARM_CATEGORY_HATE_SPEECH",
|
| 115 |
-
"threshold": getattr(self, 'safety_threshold', "BLOCK_MEDIUM_AND_ABOVE")
|
| 116 |
-
},
|
| 117 |
-
{
|
| 118 |
-
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
| 119 |
-
"threshold": getattr(self, 'safety_threshold', "BLOCK_MEDIUM_AND_ABOVE")
|
| 120 |
-
},
|
| 121 |
-
{
|
| 122 |
-
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
| 123 |
-
"threshold": getattr(self, 'safety_threshold', "BLOCK_MEDIUM_AND_ABOVE")
|
| 124 |
-
}
|
| 125 |
-
]
|
| 126 |
-
|
| 127 |
-
def handle_quota_error(self, error_message: str = "") -> bool:
|
| 128 |
-
"""
|
| 129 |
-
Handle quota exceeded error by switching to next available API key.
|
| 130 |
-
|
| 131 |
-
Args:
|
| 132 |
-
error_message: Error message from the failed request
|
| 133 |
-
|
| 134 |
-
Returns:
|
| 135 |
-
bool: True if successfully switched to new key, False if no keys available
|
| 136 |
-
"""
|
| 137 |
-
# Mark current key as exhausted
|
| 138 |
-
if self.current_key_index < len(self.api_keys):
|
| 139 |
-
current_key = self.api_keys[self.current_key_index]
|
| 140 |
-
current_key.is_active = False
|
| 141 |
-
print(f"Marking {current_key.label} as exhausted")
|
| 142 |
-
|
| 143 |
-
# Find next active key
|
| 144 |
-
original_index = self.current_key_index
|
| 145 |
-
attempts = 0
|
| 146 |
-
|
| 147 |
-
while attempts < len(self.api_keys):
|
| 148 |
-
self.current_key_index = (self.current_key_index + 1) % len(self.api_keys)
|
| 149 |
-
attempts += 1
|
| 150 |
-
|
| 151 |
-
if self.api_keys[self.current_key_index].is_active:
|
| 152 |
-
print(f"Switched to {self.api_keys[self.current_key_index].label}")
|
| 153 |
-
return True
|
| 154 |
-
|
| 155 |
-
if self.current_key_index == original_index:
|
| 156 |
-
break
|
| 157 |
-
|
| 158 |
-
# No active keys found
|
| 159 |
-
print("No active API keys remaining")
|
| 160 |
-
return False
|
| 161 |
-
|
| 162 |
-
def record_successful_request(self):
|
| 163 |
-
"""Record a successful request for rate limiting."""
|
| 164 |
-
if self.current_key_index < len(self.api_keys):
|
| 165 |
-
key_info = self.api_keys[self.current_key_index]
|
| 166 |
-
current_time = time.time()
|
| 167 |
-
|
| 168 |
-
# Reset minute counter if more than a minute has passed
|
| 169 |
-
if current_time - key_info.last_request_time > 60:
|
| 170 |
-
key_info.requests_this_minute = 0
|
| 171 |
-
|
| 172 |
-
key_info.requests_used += 1
|
| 173 |
-
key_info.requests_this_minute += 1
|
| 174 |
-
key_info.last_request_time = current_time
|
| 175 |
-
|
| 176 |
-
# Check if we should preemptively mark key as exhausted
|
| 177 |
-
if (key_info.requests_used >= key_info.daily_limit or
|
| 178 |
-
key_info.requests_this_minute >= key_info.minute_limit):
|
| 179 |
-
print(f"Rate limit approaching for {key_info.label}, marking as exhausted")
|
| 180 |
-
key_info.is_active = False
|
| 181 |
-
|
| 182 |
-
def get_keys_status(self) -> Dict[str, Any]:
|
| 183 |
-
"""
|
| 184 |
-
Get status of all API keys.
|
| 185 |
-
|
| 186 |
-
Returns:
|
| 187 |
-
Dictionary with detailed status information
|
| 188 |
-
"""
|
| 189 |
-
keys_status = []
|
| 190 |
-
|
| 191 |
-
for i, key_info in enumerate(self.api_keys):
|
| 192 |
-
keys_status.append({
|
| 193 |
-
"label": key_info.label,
|
| 194 |
-
"is_active": key_info.is_active,
|
| 195 |
-
"is_current": i == self.current_key_index,
|
| 196 |
-
"daily_usage": key_info.requests_used,
|
| 197 |
-
"minute_usage": key_info.requests_this_minute,
|
| 198 |
-
"daily_limit": key_info.daily_limit,
|
| 199 |
-
"minute_limit": key_info.minute_limit
|
| 200 |
-
})
|
| 201 |
-
|
| 202 |
-
return {
|
| 203 |
-
"total_keys": len(self.api_keys),
|
| 204 |
-
"active_keys": sum(1 for key in self.api_keys if key.is_active),
|
| 205 |
-
"current_key": self.current_key_index,
|
| 206 |
-
"keys": keys_status
|
| 207 |
-
}
|
| 208 |
-
|
| 209 |
-
def reset_all_keys(self):
|
| 210 |
-
"""Reset all API keys to active state (for testing purposes)."""
|
| 211 |
-
for key_info in self.api_keys:
|
| 212 |
-
key_info.is_active = True
|
| 213 |
-
key_info.requests_used = 0
|
| 214 |
-
key_info.requests_this_minute = 0
|
| 215 |
-
key_info.last_request_time = 0
|
| 216 |
-
print("All API keys reset to active state")
|
| 217 |
-
|
| 218 |
-
def add_api_key(self, api_key: str, label: str = None):
|
| 219 |
-
"""
|
| 220 |
-
Add a new API key to the configuration.
|
| 221 |
-
|
| 222 |
-
Args:
|
| 223 |
-
api_key: The API key string
|
| 224 |
-
label: Optional label for the key
|
| 225 |
-
"""
|
| 226 |
-
if label is None:
|
| 227 |
-
label = f"Key #{len(self.api_keys) + 1}"
|
| 228 |
-
|
| 229 |
-
self.api_keys.append(APIKeyInfo(key=api_key, label=label))
|
| 230 |
-
print(f"Added new API key: {label}")
|
| 231 |
-
|
| 232 |
-
def remove_api_key(self, index: int):
|
| 233 |
-
"""
|
| 234 |
-
Remove an API key by index.
|
| 235 |
-
|
| 236 |
-
Args:
|
| 237 |
-
index: Index of the key to remove
|
| 238 |
-
"""
|
| 239 |
-
if 0 <= index < len(self.api_keys):
|
| 240 |
-
removed_key = self.api_keys.pop(index)
|
| 241 |
-
print(f"Removed API key: {removed_key.label}")
|
| 242 |
-
|
| 243 |
-
# Adjust current index if necessary
|
| 244 |
-
if self.current_key_index >= len(self.api_keys):
|
| 245 |
-
self.current_key_index = 0
|
| 246 |
-
else:
|
| 247 |
-
print(f"Invalid key index: {index}")
|
| 248 |
-
|
| 249 |
-
def __getattr__(self, name):
|
| 250 |
-
"""
|
| 251 |
-
Handle missing attributes gracefully with sensible defaults.
|
| 252 |
-
This ensures backward compatibility with existing code.
|
| 253 |
-
"""
|
| 254 |
-
defaults = {
|
| 255 |
-
'max_history_length': 10,
|
| 256 |
-
'safety_threshold': 'BLOCK_MEDIUM_AND_ABOVE',
|
| 257 |
-
'candidate_count': 1,
|
| 258 |
-
'stop_sequences': [],
|
| 259 |
-
'presence_penalty': 0.0,
|
| 260 |
-
'frequency_penalty': 0.0,
|
| 261 |
-
'response_mime_type': 'text/plain',
|
| 262 |
-
'system_instruction': '',
|
| 263 |
-
'tools': [],
|
| 264 |
-
'tool_config': None
|
| 265 |
-
}
|
| 266 |
-
|
| 267 |
-
if name in defaults:
|
| 268 |
-
# Set the attribute so it's available for future access
|
| 269 |
-
setattr(self, name, defaults[name])
|
| 270 |
-
return defaults[name]
|
| 271 |
-
|
| 272 |
-
# If no default is available, raise the normal AttributeError
|
| 273 |
-
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
# Example of how to set up multiple API keys in your environment:
|
| 277 |
-
def setup_example():
|
| 278 |
-
"""
|
| 279 |
-
Example showing how to set up multiple API keys.
|
| 280 |
-
This would typically be done in your shell/environment, not in code.
|
| 281 |
-
"""
|
| 282 |
-
example_setup = '''
|
| 283 |
-
# In your shell or .env file:
|
| 284 |
-
export GEMINI_API_KEY="your-primary-key-here"
|
| 285 |
-
export GEMINI_API_KEY_1="your-first-backup-key"
|
| 286 |
-
export GEMINI_API_KEY_2="your-second-backup-key"
|
| 287 |
-
export GEMINI_API_KEY_3="your-third-backup-key"
|
| 288 |
-
'''
|
| 289 |
-
print("To use multiple API keys, set up your environment like this:")
|
| 290 |
-
print(example_setup)
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
if __name__ == "__main__":
|
| 294 |
-
try:
|
| 295 |
-
config = Config()
|
| 296 |
-
status = config.get_keys_status()
|
| 297 |
-
|
| 298 |
-
print("=== Configuration Status ===")
|
| 299 |
-
print(f"Model: {config.model_name}")
|
| 300 |
-
print(f"Total API keys: {status['total_keys']}")
|
| 301 |
-
print(f"Active keys: {status['active_keys']}")
|
| 302 |
-
print()
|
| 303 |
-
|
| 304 |
-
print("API Keys:")
|
| 305 |
-
for key_status in status["keys"]:
|
| 306 |
-
active_indicator = "✓" if key_status["is_active"] else "✗"
|
| 307 |
-
current_indicator = " (CURRENT)" if key_status["is_current"] else ""
|
| 308 |
-
print(f" {active_indicator} {key_status['label']}: "
|
| 309 |
-
f"{key_status['daily_usage']}/{key_status['daily_limit']} daily, "
|
| 310 |
-
f"{key_status['minute_usage']}/{key_status['minute_limit']} per minute"
|
| 311 |
-
f"{current_indicator}")
|
| 312 |
-
|
| 313 |
-
except Exception as e:
|
| 314 |
-
print(f"Error: {e}")
|
| 315 |
-
setup_example()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/gemini_client.py
DELETED
|
@@ -1,232 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Enhanced Gemini client with automatic API key fallback.
|
| 3 |
-
"""
|
| 4 |
-
|
| 5 |
-
import google.generativeai as genai
|
| 6 |
-
import time
|
| 7 |
-
from typing import Optional, Dict, Any
|
| 8 |
-
from config import Config # Import the enhanced config
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class GeminiClientWithFallback:
|
| 12 |
-
"""
|
| 13 |
-
Enhanced Gemini client that automatically switches API keys when quota is exceeded.
|
| 14 |
-
"""
|
| 15 |
-
|
| 16 |
-
def __init__(self, config: Config):
|
| 17 |
-
"""
|
| 18 |
-
Initialize the client with configuration.
|
| 19 |
-
|
| 20 |
-
Args:
|
| 21 |
-
config: Enhanced configuration with multiple API keys
|
| 22 |
-
"""
|
| 23 |
-
self.config = config
|
| 24 |
-
self.current_model = None
|
| 25 |
-
self._initialize_model()
|
| 26 |
-
|
| 27 |
-
def _initialize_model(self):
|
| 28 |
-
"""Initialize the Gemini model with current API key."""
|
| 29 |
-
try:
|
| 30 |
-
current_key = self.config.get_current_api_key()
|
| 31 |
-
genai.configure(api_key=current_key)
|
| 32 |
-
self.current_model = genai.GenerativeModel(
|
| 33 |
-
model_name=self.config.model_name,
|
| 34 |
-
generation_config=self.config.get_generation_config()
|
| 35 |
-
)
|
| 36 |
-
print(f"Initialized model with API key: {self.config.api_keys[self.config.current_key_index].label}")
|
| 37 |
-
except RuntimeError as e:
|
| 38 |
-
print(f"Failed to initialize model: {e}")
|
| 39 |
-
self.current_model = None
|
| 40 |
-
|
| 41 |
-
def _switch_api_key(self, error_message: str = "") -> bool:
|
| 42 |
-
"""
|
| 43 |
-
Switch to next available API key.
|
| 44 |
-
|
| 45 |
-
Args:
|
| 46 |
-
error_message: Error message from failed request
|
| 47 |
-
|
| 48 |
-
Returns:
|
| 49 |
-
bool: True if successfully switched, False if no keys available
|
| 50 |
-
"""
|
| 51 |
-
if self.config.handle_quota_error(error_message):
|
| 52 |
-
self._initialize_model()
|
| 53 |
-
return self.current_model is not None
|
| 54 |
-
return False
|
| 55 |
-
|
| 56 |
-
def generate_content(self, prompt: str, max_retries: Optional[int] = None) -> Optional[str]:
|
| 57 |
-
"""
|
| 58 |
-
Generate content with automatic API key fallback.
|
| 59 |
-
|
| 60 |
-
Args:
|
| 61 |
-
prompt: The prompt to send to Gemini
|
| 62 |
-
max_retries: Maximum number of retries (uses config default if None)
|
| 63 |
-
|
| 64 |
-
Returns:
|
| 65 |
-
Generated content or None if all attempts failed
|
| 66 |
-
"""
|
| 67 |
-
if max_retries is None:
|
| 68 |
-
max_retries = self.config.retry_attempts
|
| 69 |
-
|
| 70 |
-
last_error = None
|
| 71 |
-
|
| 72 |
-
for attempt in range(max_retries + 1):
|
| 73 |
-
if not self.current_model:
|
| 74 |
-
print("No model available. Trying to initialize...")
|
| 75 |
-
self._initialize_model()
|
| 76 |
-
if not self.current_model:
|
| 77 |
-
continue
|
| 78 |
-
|
| 79 |
-
try:
|
| 80 |
-
print(f"Attempt {attempt + 1}/{max_retries + 1} with {self.config.api_keys[self.config.current_key_index].label}")
|
| 81 |
-
|
| 82 |
-
# Make the request
|
| 83 |
-
response = self.current_model.generate_content(
|
| 84 |
-
prompt,
|
| 85 |
-
request_options={"timeout": self.config.timeout_seconds}
|
| 86 |
-
)
|
| 87 |
-
|
| 88 |
-
# Record successful request
|
| 89 |
-
self.config.record_successful_request()
|
| 90 |
-
|
| 91 |
-
# Return the generated text
|
| 92 |
-
if response and response.text:
|
| 93 |
-
return response.text.strip()
|
| 94 |
-
else:
|
| 95 |
-
print("Empty response received")
|
| 96 |
-
return None
|
| 97 |
-
|
| 98 |
-
except Exception as e:
|
| 99 |
-
error_message = str(e)
|
| 100 |
-
last_error = error_message
|
| 101 |
-
print(f"Request failed: {error_message}")
|
| 102 |
-
|
| 103 |
-
# Check if it's a quota error
|
| 104 |
-
if "429" in error_message or "quota" in error_message.lower():
|
| 105 |
-
print("Quota exceeded detected. Attempting to switch API key...")
|
| 106 |
-
|
| 107 |
-
if not self._switch_api_key(error_message):
|
| 108 |
-
print("No more API keys available")
|
| 109 |
-
break
|
| 110 |
-
|
| 111 |
-
# Continue to next attempt with new key
|
| 112 |
-
continue
|
| 113 |
-
|
| 114 |
-
# For other errors, wait a bit before retrying
|
| 115 |
-
elif attempt < max_retries:
|
| 116 |
-
wait_time = min(2 ** attempt, 10) # Exponential backoff, max 10 seconds
|
| 117 |
-
print(f"Waiting {wait_time} seconds before retry...")
|
| 118 |
-
time.sleep(wait_time)
|
| 119 |
-
|
| 120 |
-
print(f"All attempts failed. Last error: {last_error}")
|
| 121 |
-
return None
|
| 122 |
-
|
| 123 |
-
def get_status(self) -> Dict[str, Any]:
|
| 124 |
-
"""
|
| 125 |
-
Get current status of the client and API keys.
|
| 126 |
-
|
| 127 |
-
Returns:
|
| 128 |
-
Dictionary with status information
|
| 129 |
-
"""
|
| 130 |
-
status = self.config.get_keys_status()
|
| 131 |
-
status.update({
|
| 132 |
-
"model_initialized": self.current_model is not None,
|
| 133 |
-
"current_model": self.config.model_name
|
| 134 |
-
})
|
| 135 |
-
return status
|
| 136 |
-
|
| 137 |
-
def reset_all_keys(self):
|
| 138 |
-
"""Reset all API keys to active state (for testing purposes)."""
|
| 139 |
-
for key_info in self.config.api_keys:
|
| 140 |
-
key_info.is_active = True
|
| 141 |
-
key_info.requests_used = 0
|
| 142 |
-
key_info.requests_this_minute = 0
|
| 143 |
-
print("All API keys reset to active state")
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
# Example usage and testing
|
| 147 |
-
def example_usage():
|
| 148 |
-
"""
|
| 149 |
-
Example of how to use the enhanced client with fallback.
|
| 150 |
-
"""
|
| 151 |
-
try:
|
| 152 |
-
# Initialize configuration
|
| 153 |
-
config = Config()
|
| 154 |
-
|
| 155 |
-
# Create client
|
| 156 |
-
client = GeminiClientWithFallback(config)
|
| 157 |
-
|
| 158 |
-
# Print initial status
|
| 159 |
-
print("=== Initial Status ===")
|
| 160 |
-
status = client.get_status()
|
| 161 |
-
print(f"Model initialized: {status['model_initialized']}")
|
| 162 |
-
print(f"Total API keys: {status['total_keys']}")
|
| 163 |
-
print(f"Current key: {status['keys'][status['current_key']]['label']}")
|
| 164 |
-
|
| 165 |
-
# Test request
|
| 166 |
-
print("\n=== Testing Request ===")
|
| 167 |
-
prompt = "Explain quantum computing in simple terms."
|
| 168 |
-
response = client.generate_content(prompt)
|
| 169 |
-
|
| 170 |
-
if response:
|
| 171 |
-
print("Success! Response received:")
|
| 172 |
-
print(response[:200] + "..." if len(response) > 200 else response)
|
| 173 |
-
else:
|
| 174 |
-
print("Failed to get response")
|
| 175 |
-
|
| 176 |
-
# Print final status
|
| 177 |
-
print("\n=== Final Status ===")
|
| 178 |
-
final_status = client.get_status()
|
| 179 |
-
for key_status in final_status["keys"]:
|
| 180 |
-
active_indicator = "✓" if key_status["is_active"] else "✗"
|
| 181 |
-
current_indicator = " (CURRENT)" if key_status["is_current"] else ""
|
| 182 |
-
print(f" {active_indicator} {key_status['label']}: {key_status['daily_usage']} daily, {key_status['minute_usage']} per minute{current_indicator}")
|
| 183 |
-
|
| 184 |
-
except Exception as e:
|
| 185 |
-
print(f"Error in example: {e}")
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
def simulate_quota_exhaustion():
|
| 189 |
-
"""
|
| 190 |
-
Simulate quota exhaustion to test key switching.
|
| 191 |
-
"""
|
| 192 |
-
try:
|
| 193 |
-
config = Config()
|
| 194 |
-
client = GeminiClientWithFallback(config)
|
| 195 |
-
|
| 196 |
-
print("=== Simulating Heavy Usage ===")
|
| 197 |
-
|
| 198 |
-
# Simulate many requests to trigger quota limits
|
| 199 |
-
for i in range(15): # More than the free tier limit per minute
|
| 200 |
-
print(f"\nRequest {i+1}:")
|
| 201 |
-
response = client.generate_content(f"Count to {i+1}")
|
| 202 |
-
|
| 203 |
-
if response:
|
| 204 |
-
print(f"Success: {response[:50]}...")
|
| 205 |
-
else:
|
| 206 |
-
print("Failed")
|
| 207 |
-
|
| 208 |
-
# Show current status
|
| 209 |
-
status = client.get_status()
|
| 210 |
-
current_key = status['keys'][status['current_key']]
|
| 211 |
-
print(f"Current key: {current_key['label']} ({current_key['minute_usage']} requests this minute)")
|
| 212 |
-
|
| 213 |
-
time.sleep(1) # Small delay between requests
|
| 214 |
-
|
| 215 |
-
except Exception as e:
|
| 216 |
-
print(f"Error in simulation: {e}")
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
if __name__ == "__main__":
|
| 220 |
-
print("Choose test mode:")
|
| 221 |
-
print("1. Normal usage example")
|
| 222 |
-
print("2. Simulate quota exhaustion")
|
| 223 |
-
|
| 224 |
-
choice = input("Enter choice (1 or 2): ").strip()
|
| 225 |
-
|
| 226 |
-
if choice == "1":
|
| 227 |
-
example_usage()
|
| 228 |
-
elif choice == "2":
|
| 229 |
-
simulate_quota_exhaustion()
|
| 230 |
-
else:
|
| 231 |
-
print("Invalid choice")
|
| 232 |
-
example_usage()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/ui.py
DELETED
|
@@ -1,184 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Gradio user interface for the Gemini AI Agent.
|
| 3 |
-
"""
|
| 4 |
-
|
| 5 |
-
import gradio as gr
|
| 6 |
-
import asyncio
|
| 7 |
-
import logging
|
| 8 |
-
from typing import List, Tuple, Optional
|
| 9 |
-
from src.agent import GeminiAgent
|
| 10 |
-
from src.utils import validate_question, format_error_message
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
logger = logging.getLogger(__name__)
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
def create_interface(agent: GeminiAgent) -> gr.Blocks:
|
| 17 |
-
"""
|
| 18 |
-
Create the Gradio interface for the AI agent.
|
| 19 |
-
|
| 20 |
-
Args:
|
| 21 |
-
agent: The GeminiAgent instance
|
| 22 |
-
|
| 23 |
-
Returns:
|
| 24 |
-
gr.Blocks: The Gradio interface
|
| 25 |
-
"""
|
| 26 |
-
|
| 27 |
-
async def process_question_async(question: str, context: str = "") -> str:
|
| 28 |
-
"""
|
| 29 |
-
Async wrapper for question processing.
|
| 30 |
-
"""
|
| 31 |
-
return await agent.process_question(question, context or None)
|
| 32 |
-
|
| 33 |
-
def process_question_sync(question: str, context: str = "") -> str:
|
| 34 |
-
"""
|
| 35 |
-
Synchronous wrapper for question processing (required by Gradio).
|
| 36 |
-
"""
|
| 37 |
-
try:
|
| 38 |
-
# Validate question
|
| 39 |
-
is_valid, error_msg = validate_question(question)
|
| 40 |
-
if not is_valid:
|
| 41 |
-
return f"❌ **Error**: {error_msg}"
|
| 42 |
-
|
| 43 |
-
# Process question
|
| 44 |
-
loop = asyncio.new_event_loop()
|
| 45 |
-
asyncio.set_event_loop(loop)
|
| 46 |
-
try:
|
| 47 |
-
result = loop.run_until_complete(process_question_async(question, context))
|
| 48 |
-
return f"🤖 **AI Response**:\n\n{result}"
|
| 49 |
-
finally:
|
| 50 |
-
loop.close()
|
| 51 |
-
|
| 52 |
-
except Exception as e:
|
| 53 |
-
error_msg = format_error_message(e)
|
| 54 |
-
return f"❌ **Error**: {error_msg}"
|
| 55 |
-
|
| 56 |
-
def clear_conversation():
|
| 57 |
-
"""
|
| 58 |
-
Clear the conversation history.
|
| 59 |
-
"""
|
| 60 |
-
agent.clear_history()
|
| 61 |
-
return "", "Conversation history cleared! ✨"
|
| 62 |
-
|
| 63 |
-
def get_agent_info() -> str:
|
| 64 |
-
"""
|
| 65 |
-
Get information about the agent.
|
| 66 |
-
"""
|
| 67 |
-
stats = agent.get_stats()
|
| 68 |
-
return f"""
|
| 69 |
-
## 🤖 Agent Information
|
| 70 |
-
|
| 71 |
-
**Model**: {stats['model']}
|
| 72 |
-
**Conversation Length**: {stats['conversation_length']} exchanges
|
| 73 |
-
**Max Tokens**: {stats['max_tokens']}
|
| 74 |
-
**Temperature**: {stats['temperature']}
|
| 75 |
-
|
| 76 |
-
### 🎯 Specialized Capabilities
|
| 77 |
-
- **Complex Research**: Multi-source fact-checking and analysis
|
| 78 |
-
- **Mathematical Reasoning**: Step-by-step problem solving
|
| 79 |
-
- **Multi-modal Analysis**: Processing images, videos, and audio
|
| 80 |
-
- **Data Interpretation**: Tables, charts, and statistical analysis
|
| 81 |
-
- **Creative Problem Solving**: Innovative approaches to unusual questions
|
| 82 |
-
"""
|
| 83 |
-
|
| 84 |
-
# Create the interface
|
| 85 |
-
with gr.Blocks(
|
| 86 |
-
title="Gemini AI Research Agent",
|
| 87 |
-
theme=gr.themes.Soft(),
|
| 88 |
-
css="""
|
| 89 |
-
.container {
|
| 90 |
-
max-width: 1200px;
|
| 91 |
-
margin: auto;
|
| 92 |
-
}
|
| 93 |
-
.question-box {
|
| 94 |
-
border-left: 4px solid #4CAF50;
|
| 95 |
-
padding-left: 16px;
|
| 96 |
-
}
|
| 97 |
-
.response-box {
|
| 98 |
-
background-color: #f8f9fa;
|
| 99 |
-
border-radius: 8px;
|
| 100 |
-
padding: 16px;
|
| 101 |
-
}
|
| 102 |
-
"""
|
| 103 |
-
) as interface:
|
| 104 |
-
|
| 105 |
-
gr.Markdown("""
|
| 106 |
-
# 🧠 Gemini AI Research Agent
|
| 107 |
-
|
| 108 |
-
An advanced AI assistant powered by Google's Gemini 1.5 Flash, specialized in handling complex research questions, data analysis, and multi-modal content processing.
|
| 109 |
-
|
| 110 |
-
**Perfect for**: Academic research, fact-checking, mathematical problems, data analysis, and challenging multi-step questions.
|
| 111 |
-
""")
|
| 112 |
-
|
| 113 |
-
with gr.Row():
|
| 114 |
-
with gr.Column(scale=2):
|
| 115 |
-
with gr.Group():
|
| 116 |
-
gr.Markdown("## 💬 Ask Your Question")
|
| 117 |
-
|
| 118 |
-
question_input = gr.Textbox(
|
| 119 |
-
label="Question",
|
| 120 |
-
placeholder="Enter your research question here... (e.g., 'How many studio albums were published by Mercedes Sosa between 2000 and 2009?')",
|
| 121 |
-
lines=4,
|
| 122 |
-
elem_classes=["question-box"]
|
| 123 |
-
)
|
| 124 |
-
|
| 125 |
-
context_input = gr.Textbox(
|
| 126 |
-
label="Additional Context (Optional)",
|
| 127 |
-
placeholder="Provide any additional context, constraints, or specific requirements...",
|
| 128 |
-
lines=2
|
| 129 |
-
)
|
| 130 |
-
|
| 131 |
-
with gr.Row():
|
| 132 |
-
submit_btn = gr.Button("🔍 Ask Question", variant="primary", size="lg")
|
| 133 |
-
clear_btn = gr.Button("🗑️ Clear History", variant="secondary")
|
| 134 |
-
|
| 135 |
-
with gr.Group():
|
| 136 |
-
gr.Markdown("## 📝 Response")
|
| 137 |
-
response_output = gr.Textbox(
|
| 138 |
-
label="AI Response",
|
| 139 |
-
lines=15,
|
| 140 |
-
interactive=False,
|
| 141 |
-
elem_classes=["response-box"]
|
| 142 |
-
)
|
| 143 |
-
|
| 144 |
-
with gr.Column(scale=1):
|
| 145 |
-
with gr.Group():
|
| 146 |
-
gr.Markdown("## ℹ️ Agent Status")
|
| 147 |
-
agent_info = gr.Markdown(get_agent_info())
|
| 148 |
-
refresh_info_btn = gr.Button("🔄 Refresh Info", size="sm")
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
gr.Markdown("""
|
| 152 |
-
---
|
| 153 |
-
### 🔧 Tips for Best Results:
|
| 154 |
-
- **Be Specific**: Include all relevant details and constraints
|
| 155 |
-
- **Multi-step Questions**: Break complex questions into clear parts
|
| 156 |
-
- **Context Matters**: Use the context field for additional information
|
| 157 |
-
- **Iterative Approach**: Build on previous questions for deeper analysis
|
| 158 |
-
""")
|
| 159 |
-
|
| 160 |
-
# Event handlers
|
| 161 |
-
submit_btn.click(
|
| 162 |
-
fn=process_question_sync,
|
| 163 |
-
inputs=[question_input, context_input],
|
| 164 |
-
outputs=[response_output]
|
| 165 |
-
)
|
| 166 |
-
|
| 167 |
-
clear_btn.click(
|
| 168 |
-
fn=clear_conversation,
|
| 169 |
-
outputs=[question_input, response_output]
|
| 170 |
-
)
|
| 171 |
-
|
| 172 |
-
refresh_info_btn.click(
|
| 173 |
-
fn=get_agent_info,
|
| 174 |
-
outputs=[agent_info]
|
| 175 |
-
)
|
| 176 |
-
|
| 177 |
-
# Allow Enter key to submit
|
| 178 |
-
question_input.submit(
|
| 179 |
-
fn=process_question_sync,
|
| 180 |
-
inputs=[question_input, context_input],
|
| 181 |
-
outputs=[response_output]
|
| 182 |
-
)
|
| 183 |
-
|
| 184 |
-
return interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils.py
DELETED
|
@@ -1,302 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Enhanced utility functions for the Gemini AI Agent.
|
| 3 |
-
Includes advanced text processing and accuracy improvements.
|
| 4 |
-
"""
|
| 5 |
-
|
| 6 |
-
import re
|
| 7 |
-
import logging
|
| 8 |
-
from typing import Optional, Tuple, List
|
| 9 |
-
import unicodedata
|
| 10 |
-
|
| 11 |
-
logger = logging.getLogger(__name__)
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
def sanitize_input(text: str) -> str:
|
| 15 |
-
"""
|
| 16 |
-
Enhanced input sanitization with better text processing.
|
| 17 |
-
|
| 18 |
-
Args:
|
| 19 |
-
text: Raw input text
|
| 20 |
-
|
| 21 |
-
Returns:
|
| 22 |
-
str: Sanitized and normalized text
|
| 23 |
-
"""
|
| 24 |
-
if not isinstance(text, str):
|
| 25 |
-
return str(text)
|
| 26 |
-
|
| 27 |
-
# Normalize unicode characters
|
| 28 |
-
text = unicodedata.normalize('NFKD', text)
|
| 29 |
-
|
| 30 |
-
# Remove excessive whitespace while preserving structure
|
| 31 |
-
text = re.sub(r'\s+', ' ', text.strip())
|
| 32 |
-
|
| 33 |
-
# Clean up common formatting issues
|
| 34 |
-
text = text.replace('\\n', '\n').replace('\\t', '\t')
|
| 35 |
-
|
| 36 |
-
# Remove zero-width characters
|
| 37 |
-
text = re.sub(r'[\u200b-\u200d\ufeff]', '', text)
|
| 38 |
-
|
| 39 |
-
return text
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
def format_response(response: str) -> str:
|
| 43 |
-
"""
|
| 44 |
-
Enhanced response formatting for better readability and accuracy.
|
| 45 |
-
|
| 46 |
-
Args:
|
| 47 |
-
response: Raw response from the AI model
|
| 48 |
-
|
| 49 |
-
Returns:
|
| 50 |
-
str: Enhanced formatted response
|
| 51 |
-
"""
|
| 52 |
-
if not response:
|
| 53 |
-
return "No response generated."
|
| 54 |
-
|
| 55 |
-
# Clean up the response
|
| 56 |
-
response = response.strip()
|
| 57 |
-
|
| 58 |
-
# Fix spacing issues
|
| 59 |
-
response = re.sub(r'\.([A-Z])', r'. \1', response)
|
| 60 |
-
response = re.sub(r'([.!?])\s*([A-Z])', r'\1 \2', response)
|
| 61 |
-
|
| 62 |
-
# Clean up multiple newlines but preserve intentional formatting
|
| 63 |
-
response = re.sub(r'\n\s*\n\s*\n+', '\n\n', response)
|
| 64 |
-
|
| 65 |
-
# Ensure proper spacing around punctuation
|
| 66 |
-
response = re.sub(r'([,;:])\s*', r'\1 ', response)
|
| 67 |
-
response = re.sub(r'\s+([,;:.])', r'\1', response)
|
| 68 |
-
|
| 69 |
-
# Fix common numerical formatting
|
| 70 |
-
response = re.sub(r'(\d)\s*,\s*(\d{3})', r'\1,\2', response) # Fix thousands separators
|
| 71 |
-
|
| 72 |
-
# Ensure proper capitalization after periods
|
| 73 |
-
def capitalize_after_period(match):
|
| 74 |
-
return match.group(1) + match.group(2).upper()
|
| 75 |
-
|
| 76 |
-
response = re.sub(r'(\. )([a-z])', capitalize_after_period, response)
|
| 77 |
-
|
| 78 |
-
return response
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
def extract_key_information(text: str, question_type: str) -> dict:
|
| 82 |
-
"""
|
| 83 |
-
Extract key information based on question type for accuracy verification.
|
| 84 |
-
|
| 85 |
-
Args:
|
| 86 |
-
text: Text to analyze
|
| 87 |
-
question_type: Type of question being answered
|
| 88 |
-
|
| 89 |
-
Returns:
|
| 90 |
-
dict: Extracted key information
|
| 91 |
-
"""
|
| 92 |
-
extracted = {'type': question_type}
|
| 93 |
-
|
| 94 |
-
if question_type == 'quantitative':
|
| 95 |
-
# Extract numbers
|
| 96 |
-
numbers = re.findall(r'\b\d+(?:,\d{3})*(?:\.\d+)?\b', text)
|
| 97 |
-
extracted['numbers'] = numbers
|
| 98 |
-
|
| 99 |
-
elif question_type == 'temporal':
|
| 100 |
-
# Extract dates and years
|
| 101 |
-
years = re.findall(r'\b(19|20)\d{2}\b', text)
|
| 102 |
-
dates = re.findall(r'\b\d{1,2}[/-]\d{1,2}[/-]\d{2,4}\b', text)
|
| 103 |
-
extracted['years'] = years
|
| 104 |
-
extracted['dates'] = dates
|
| 105 |
-
|
| 106 |
-
elif question_type == 'biographical':
|
| 107 |
-
# Extract names (basic pattern)
|
| 108 |
-
names = re.findall(r'\b[A-Z][a-z]+ [A-Z][a-z]+\b', text)
|
| 109 |
-
extracted['names'] = names
|
| 110 |
-
|
| 111 |
-
return extracted
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
def validate_question(question: str) -> Tuple[bool, Optional[str]]:
|
| 115 |
-
"""
|
| 116 |
-
Enhanced question validation with specific checks.
|
| 117 |
-
|
| 118 |
-
Args:
|
| 119 |
-
question: The question to validate
|
| 120 |
-
|
| 121 |
-
Returns:
|
| 122 |
-
tuple: (is_valid, error_message)
|
| 123 |
-
"""
|
| 124 |
-
if not question or not question.strip():
|
| 125 |
-
return False, "Question cannot be empty."
|
| 126 |
-
|
| 127 |
-
clean_question = question.strip()
|
| 128 |
-
|
| 129 |
-
# Length checks
|
| 130 |
-
if len(clean_question) < 3:
|
| 131 |
-
return False, "Question is too short. Please provide more detail."
|
| 132 |
-
|
| 133 |
-
if len(clean_question) > 8000: # Increased limit for complex questions
|
| 134 |
-
return False, "Question is too long. Please keep it under 8000 characters."
|
| 135 |
-
|
| 136 |
-
# Check for question-like structure
|
| 137 |
-
question_indicators = ['?', 'what', 'how', 'when', 'where', 'who', 'why', 'which', 'can', 'is', 'are', 'do', 'does']
|
| 138 |
-
has_question_indicator = any(indicator in clean_question.lower() for indicator in question_indicators)
|
| 139 |
-
|
| 140 |
-
if not has_question_indicator:
|
| 141 |
-
return False, "Please formulate your input as a clear question."
|
| 142 |
-
|
| 143 |
-
# Check for potentially problematic content
|
| 144 |
-
problematic_patterns = [
|
| 145 |
-
r'<script.*?>.*?</script>', # Script tags
|
| 146 |
-
r'javascript:', # JavaScript protocol
|
| 147 |
-
r'data:text/html', # Data URLs
|
| 148 |
-
]
|
| 149 |
-
|
| 150 |
-
for pattern in problematic_patterns:
|
| 151 |
-
if re.search(pattern, clean_question, re.IGNORECASE):
|
| 152 |
-
return False, "Question contains potentially unsafe content."
|
| 153 |
-
|
| 154 |
-
return True, None
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
def extract_confidence_indicators(text: str) -> dict:
|
| 158 |
-
"""
|
| 159 |
-
Extract confidence indicators from response text.
|
| 160 |
-
|
| 161 |
-
Args:
|
| 162 |
-
text: Response text to analyze
|
| 163 |
-
|
| 164 |
-
Returns:
|
| 165 |
-
dict: Confidence analysis
|
| 166 |
-
"""
|
| 167 |
-
# High confidence indicators
|
| 168 |
-
high_confidence = [
|
| 169 |
-
'according to', 'research shows', 'studies indicate', 'data shows',
|
| 170 |
-
'confirmed', 'established', 'proven', 'documented', 'verified'
|
| 171 |
-
]
|
| 172 |
-
|
| 173 |
-
# Low confidence indicators
|
| 174 |
-
low_confidence = [
|
| 175 |
-
'i think', 'i believe', 'probably', 'might be', 'could be',
|
| 176 |
-
'it seems', 'appears to be', 'likely', 'possibly', 'perhaps',
|
| 177 |
-
'uncertain', 'unclear', 'ambiguous'
|
| 178 |
-
]
|
| 179 |
-
|
| 180 |
-
# Hedging language
|
| 181 |
-
hedging = [
|
| 182 |
-
'somewhat', 'rather', 'quite', 'fairly', 'relatively',
|
| 183 |
-
'generally', 'typically', 'usually', 'often'
|
| 184 |
-
]
|
| 185 |
-
|
| 186 |
-
text_lower = text.lower()
|
| 187 |
-
|
| 188 |
-
high_count = sum(1 for phrase in high_confidence if phrase in text_lower)
|
| 189 |
-
low_count = sum(1 for phrase in low_confidence if phrase in text_lower)
|
| 190 |
-
hedge_count = sum(1 for phrase in hedging if phrase in text_lower)
|
| 191 |
-
|
| 192 |
-
# Calculate confidence score
|
| 193 |
-
confidence_score = max(0.1, min(1.0, (high_count * 0.3 + (1 - low_count * 0.2) + (1 - hedge_count * 0.1))))
|
| 194 |
-
|
| 195 |
-
return {
|
| 196 |
-
'confidence_score': confidence_score,
|
| 197 |
-
'high_confidence_indicators': high_count,
|
| 198 |
-
'low_confidence_indicators': low_count,
|
| 199 |
-
'hedging_indicators': hedge_count,
|
| 200 |
-
'assessment': 'high' if confidence_score > 0.7 else 'medium' if confidence_score > 0.4 else 'low'
|
| 201 |
-
}
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
def format_error_message(error: Exception) -> str:
|
| 205 |
-
"""
|
| 206 |
-
Enhanced error message formatting with more specific guidance.
|
| 207 |
-
|
| 208 |
-
Args:
|
| 209 |
-
error: The exception that occurred
|
| 210 |
-
|
| 211 |
-
Returns:
|
| 212 |
-
str: User-friendly error message with guidance
|
| 213 |
-
"""
|
| 214 |
-
error_type = type(error).__name__
|
| 215 |
-
error_str = str(error)
|
| 216 |
-
|
| 217 |
-
# Enhanced error messages with actionable guidance
|
| 218 |
-
error_messages = {
|
| 219 |
-
'ConnectionError': 'Unable to connect to the AI service. Please check your internet connection and try again.',
|
| 220 |
-
'TimeoutError': 'The request timed out. This often happens with very complex questions. Try breaking your question into smaller parts.',
|
| 221 |
-
'ValueError': 'There was an issue with the input format. Please check that your question is clearly stated and try again.',
|
| 222 |
-
'KeyError': 'Configuration error detected. Please verify that your API key is correctly set.',
|
| 223 |
-
'PermissionError': 'Access denied. Please check that your API key has the necessary permissions.',
|
| 224 |
-
'RateLimitError': 'Rate limit exceeded. Please wait a moment before trying again.',
|
| 225 |
-
'QuotaExceededError': 'API quota exceeded. Please check your account limits.',
|
| 226 |
-
'AuthenticationError': 'Authentication failed. Please verify your API key is correct.'
|
| 227 |
-
}
|
| 228 |
-
|
| 229 |
-
# Check for specific error patterns
|
| 230 |
-
if 'quota' in error_str.lower():
|
| 231 |
-
user_message = 'API quota exceeded. Please check your account limits or try again later.'
|
| 232 |
-
elif 'rate limit' in error_str.lower():
|
| 233 |
-
user_message = 'Rate limit exceeded. Please wait a moment before trying again.'
|
| 234 |
-
elif 'authentication' in error_str.lower() or 'api key' in error_str.lower():
|
| 235 |
-
user_message = 'Authentication error. Please verify your API key is correct and has proper permissions.'
|
| 236 |
-
else:
|
| 237 |
-
user_message = error_messages.get(error_type, f"An unexpected error occurred: {error_str}")
|
| 238 |
-
|
| 239 |
-
logger.error(f"Enhanced error formatted for user: {error_type} - {error_str}")
|
| 240 |
-
return user_message
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
def optimize_prompt_for_accuracy(prompt: str, question_type: str) -> str:
|
| 244 |
-
"""
|
| 245 |
-
Optimize prompts based on question types for better accuracy.
|
| 246 |
-
|
| 247 |
-
Args:
|
| 248 |
-
prompt: Original prompt
|
| 249 |
-
question_type: Type of question
|
| 250 |
-
|
| 251 |
-
Returns:
|
| 252 |
-
str: Optimized prompt
|
| 253 |
-
"""
|
| 254 |
-
accuracy_boosters = {
|
| 255 |
-
'quantitative': "Be precise with numbers. If counting items, list them systematically. Double-check calculations.",
|
| 256 |
-
'temporal': "Provide specific dates and timeframes. If uncertain about exact dates, clearly state the closest known timeframe.",
|
| 257 |
-
'biographical': "Provide full names and relevant context. Verify the person matches the question context.",
|
| 258 |
-
'definitional': "Give clear, complete definitions. Include essential characteristics and avoid ambiguity.",
|
| 259 |
-
'comparative': "Make systematic comparisons using specific criteria. Highlight key differences clearly.",
|
| 260 |
-
'mathematical': "Show all calculation steps. Verify arithmetic. State assumptions clearly."
|
| 261 |
-
}
|
| 262 |
-
|
| 263 |
-
booster = accuracy_boosters.get(question_type, "Provide accurate, well-reasoned responses with specific details.")
|
| 264 |
-
|
| 265 |
-
return f"{prompt}\n\nACCURACY FOCUS: {booster}"
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
def truncate_text(text: str, max_length: int = 2000) -> str:
|
| 269 |
-
"""
|
| 270 |
-
Enhanced text truncation with better word boundary preservation.
|
| 271 |
-
|
| 272 |
-
Args:
|
| 273 |
-
text: Text to truncate
|
| 274 |
-
max_length: Maximum length allowed
|
| 275 |
-
|
| 276 |
-
Returns:
|
| 277 |
-
str: Truncated text
|
| 278 |
-
"""
|
| 279 |
-
if len(text) <= max_length:
|
| 280 |
-
return text
|
| 281 |
-
|
| 282 |
-
# Find the last sentence end before max_length
|
| 283 |
-
truncated = text[:max_length]
|
| 284 |
-
|
| 285 |
-
# Look for sentence endings
|
| 286 |
-
sentence_endings = ['. ', '! ', '? ']
|
| 287 |
-
last_sentence_end = -1
|
| 288 |
-
|
| 289 |
-
for ending in sentence_endings:
|
| 290 |
-
pos = truncated.rfind(ending)
|
| 291 |
-
if pos > last_sentence_end and pos > max_length * 0.7: # At least 70% of desired length
|
| 292 |
-
last_sentence_end = pos + len(ending)
|
| 293 |
-
|
| 294 |
-
if last_sentence_end > 0:
|
| 295 |
-
return truncated[:last_sentence_end].strip()
|
| 296 |
-
|
| 297 |
-
# Fall back to word boundary
|
| 298 |
-
last_space = truncated.rfind(' ')
|
| 299 |
-
if last_space > max_length * 0.8: # At least 80% of desired length
|
| 300 |
-
return truncated[:last_space] + "..."
|
| 301 |
-
else:
|
| 302 |
-
return truncated + "..."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|