| from services.gemini_client import get_gemini_client |
| import logging |
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| async def get_chatbot_response(user_text: str) -> str: |
| """ |
| Generate chatbot response using Gemini API. |
| |
| Args: |
| user_text: User input text |
| |
| Returns: |
| Chatbot response text |
| |
| Raises: |
| Exception: If response generation fails |
| """ |
| try: |
| client = get_gemini_client() |
| |
| logger.info(f"Generating chatbot response for: '{user_text}'") |
| |
| |
| system_prompt = """You are a helpful, friendly AI assistant. |
| Respond concisely and naturally to user queries. |
| Keep responses brief (1-2 sentences) for voice interaction.""" |
| |
| response = client.models.generate_content( |
| model="gemini-2.5-flash", |
| contents=[ |
| {"role": "user", "parts": [{"text": system_prompt}]}, |
| {"role": "user", "parts": [{"text": user_text}]} |
| ] |
| ) |
| |
| response_text = response.text |
| logger.info(f"✓ Response generated: '{response_text}'") |
| |
| return response_text |
| |
| except Exception as e: |
| logger.error(f"✗ Chatbot response failed: {str(e)}") |
| |
| return f"I understood you said: '{user_text}'. Could you tell me more?" |