Spaces:
Paused
Paused
| """ | |
| Gemini AI chat integration | |
| """ | |
| import requests | |
| from typing import List, Tuple | |
| from config.settings import GEMINI_API_KEY | |
| # Chat history storage | |
| _chat_history = [] | |
| def chat_with_gemini(message: str, history: List[List[str]]) -> Tuple[str, List[List[str]]]: | |
| """Chat with Gemini API""" | |
| global _chat_history | |
| if not GEMINI_API_KEY: | |
| return "Please set your GEMINI_API_KEY in the Hugging Face secrets settings.", history | |
| try: | |
| # Gemini API endpoint - Updated to use gemini-1.5-flash | |
| url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key={GEMINI_API_KEY}" | |
| # Build conversation history | |
| contents = [] | |
| # Add previous messages from history | |
| for user_msg, assistant_msg in history: | |
| if user_msg: | |
| contents.append({ | |
| "role": "user", | |
| "parts": [{"text": user_msg}] | |
| }) | |
| if assistant_msg: | |
| contents.append({ | |
| "role": "model", | |
| "parts": [{"text": assistant_msg}] | |
| }) | |
| # Add current message | |
| contents.append({ | |
| "role": "user", | |
| "parts": [{"text": message}] | |
| }) | |
| # Prepare request | |
| data = { | |
| "contents": contents, | |
| "generationConfig": { | |
| "temperature": 0.7, | |
| "topK": 40, | |
| "topP": 0.95, | |
| "maxOutputTokens": 1024, | |
| }, | |
| "safetySettings": [ | |
| { | |
| "category": "HARM_CATEGORY_HARASSMENT", | |
| "threshold": "BLOCK_MEDIUM_AND_ABOVE" | |
| }, | |
| { | |
| "category": "HARM_CATEGORY_HATE_SPEECH", | |
| "threshold": "BLOCK_MEDIUM_AND_ABOVE" | |
| }, | |
| { | |
| "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", | |
| "threshold": "BLOCK_MEDIUM_AND_ABOVE" | |
| }, | |
| { | |
| "category": "HARM_CATEGORY_DANGEROUS_CONTENT", | |
| "threshold": "BLOCK_MEDIUM_AND_ABOVE" | |
| } | |
| ] | |
| } | |
| # Make request | |
| headers = {"Content-Type": "application/json"} | |
| response = requests.post(url, headers=headers, json=data) | |
| response.raise_for_status() | |
| # Parse response | |
| result = response.json() | |
| # Extract text from response | |
| if "candidates" in result and len(result["candidates"]) > 0: | |
| candidate = result["candidates"][0] | |
| if "content" in candidate and "parts" in candidate["content"]: | |
| parts = candidate["content"]["parts"] | |
| if len(parts) > 0 and "text" in parts[0]: | |
| bot_response = parts[0]["text"] | |
| # Update history | |
| history.append([message, bot_response]) | |
| _chat_history = history | |
| return bot_response, history | |
| return "I couldn't generate a response. Please try again.", history | |
| except requests.exceptions.HTTPError as e: | |
| if e.response.status_code == 403: | |
| return "API key is invalid or doesn't have access to Gemini. Please check your GEMINI_API_KEY.", history | |
| else: | |
| return f"API Error: {e.response.status_code} - {e.response.text}", history | |
| except Exception as e: | |
| return f"Error: {str(e)}", history | |
| def clear_chat(): | |
| """Clear chat history""" | |
| global _chat_history | |
| _chat_history = [] | |
| return None, [] |