Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import asyncio
|
| 3 |
+
import time
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
import chainlit as cl
|
| 6 |
+
import google.generativeai as genai
|
| 7 |
+
|
| 8 |
+
# Load environment variables from .env (if any)
|
| 9 |
+
load_dotenv()
|
| 10 |
+
|
| 11 |
+
# Model selection (change if needed)
|
| 12 |
+
MODEL_NAME = "gemini-2.0-flash"
|
| 13 |
+
|
| 14 |
+
# Default system prompt
|
| 15 |
+
SYSTEM_PROMPT = "You are a helpful, friendly AI assistant. Provide clear and concise responses."
|
| 16 |
+
|
| 17 |
+
@cl.on_chat_start
|
| 18 |
+
async def start():
|
| 19 |
+
"""
|
| 20 |
+
Initialize the chat session with Google Generative (Gemini) client and chat object.
|
| 21 |
+
Stores client, model, chat, and message history in the user session.
|
| 22 |
+
"""
|
| 23 |
+
try:
|
| 24 |
+
api_key = os.environ.get("GOOGLE_API_KEY")
|
| 25 |
+
if not api_key:
|
| 26 |
+
raise ValueError("GOOGLE_API_KEY environment variable is not set")
|
| 27 |
+
|
| 28 |
+
# Configure the google generative client
|
| 29 |
+
genai.configure(api_key=api_key)
|
| 30 |
+
|
| 31 |
+
# Create model object
|
| 32 |
+
model = genai.GenerativeModel(MODEL_NAME)
|
| 33 |
+
|
| 34 |
+
# Build initial message history with the system prompt
|
| 35 |
+
message_history = [{"role": "system", "content": SYSTEM_PROMPT}]
|
| 36 |
+
|
| 37 |
+
# Start a persistent chat object for this session (Gemini chat API)
|
| 38 |
+
# (We start it here once and reuse for the session)
|
| 39 |
+
chat = model.start_chat(history=message_history)
|
| 40 |
+
|
| 41 |
+
# Save into Chainlit user session
|
| 42 |
+
cl.user_session.set("genai_model", model)
|
| 43 |
+
cl.user_session.set("genai_chat", chat)
|
| 44 |
+
cl.user_session.set("message_history", message_history)
|
| 45 |
+
|
| 46 |
+
await cl.Message(
|
| 47 |
+
content="Hello! I'm your Gemini-powered assistant. How can I help you today?"
|
| 48 |
+
).send()
|
| 49 |
+
|
| 50 |
+
except ValueError as e:
|
| 51 |
+
await cl.Message(
|
| 52 |
+
content=f"⚠️ Configuration Error: {str(e)}\nPlease make sure GOOGLE_API_KEY is set in the environment variables."
|
| 53 |
+
).send()
|
| 54 |
+
except Exception as e:
|
| 55 |
+
await cl.Message(content=f"⚠️ Error initializing Gemini client: {e}").send()
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@cl.on_stop
|
| 59 |
+
async def on_stop():
|
| 60 |
+
"""Cleanup when the chat session ends."""
|
| 61 |
+
try:
|
| 62 |
+
cl.user_session.clear()
|
| 63 |
+
except Exception:
|
| 64 |
+
pass
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
async def _call_chat_send_message(chat, user_text):
|
| 68 |
+
"""
|
| 69 |
+
Blocking wrapper to call chat.send_message in a thread.
|
| 70 |
+
Returns the iterable (list) of response parts produced by the Gemini client.
|
| 71 |
+
"""
|
| 72 |
+
return await asyncio.to_thread(chat.send_message, user_text)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
async def handle_error(error: Exception) -> str:
|
| 76 |
+
"""Helper function to format error messages"""
|
| 77 |
+
if "session" in str(error).lower():
|
| 78 |
+
return "⚠️ Session error occurred. Please refresh the page and try again."
|
| 79 |
+
return f"⚠️ An error occurred: {str(error)}"
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@cl.on_message
|
| 83 |
+
async def main(user_message: cl.Message):
|
| 84 |
+
"""
|
| 85 |
+
Process user messages and generate AI responses using Gemini.
|
| 86 |
+
Uses a background thread to run the blocking Gemini client call, then updates
|
| 87 |
+
the Chainlit message progressively as parts arrive.
|
| 88 |
+
"""
|
| 89 |
+
# Prepare a response message slot (so we can update progressively)
|
| 90 |
+
response_message = cl.Message(content="")
|
| 91 |
+
await response_message.send()
|
| 92 |
+
|
| 93 |
+
try:
|
| 94 |
+
# Retrieve session data
|
| 95 |
+
chat = cl.user_session.get("genai_chat")
|
| 96 |
+
message_history = cl.user_session.get("message_history")
|
| 97 |
+
|
| 98 |
+
if chat is None or message_history is None:
|
| 99 |
+
raise ValueError("Session not initialized properly. Please refresh the page.")
|
| 100 |
+
|
| 101 |
+
# Add user to history (keep history for your own bookkeeping if desired)
|
| 102 |
+
user_text = user_message.content or ""
|
| 103 |
+
message_history.append({"role": "user", "content": user_text})
|
| 104 |
+
cl.user_session.set("message_history", message_history)
|
| 105 |
+
|
| 106 |
+
# Call the blocking Gemini chat.send_message in a background thread
|
| 107 |
+
# This returns an iterable of response parts (depending on client)
|
| 108 |
+
response_iterable = await _call_chat_send_message(chat, user_text)
|
| 109 |
+
|
| 110 |
+
# Iterate through returned parts and update the message progressively
|
| 111 |
+
full_response = ""
|
| 112 |
+
try:
|
| 113 |
+
for part in response_iterable:
|
| 114 |
+
# Each part typically has `.text` attribute (defensive)
|
| 115 |
+
text_piece = getattr(part, "text", None)
|
| 116 |
+
if text_piece is None:
|
| 117 |
+
# fallback to generic string representation
|
| 118 |
+
text_piece = str(part)
|
| 119 |
+
# accumulate and update
|
| 120 |
+
full_response += text_piece
|
| 121 |
+
response_message.content = full_response
|
| 122 |
+
await response_message.update()
|
| 123 |
+
# small sleep not strictly necessary; keeps UI responsive
|
| 124 |
+
await asyncio.sleep(0.01)
|
| 125 |
+
except TypeError:
|
| 126 |
+
# If response_iterable is not directly iterable, coerce to string
|
| 127 |
+
full_response = str(response_iterable)
|
| 128 |
+
response_message.content = full_response
|
| 129 |
+
await response_message.update()
|
| 130 |
+
|
| 131 |
+
# Append assistant response to message history (if you want to keep it)
|
| 132 |
+
message_history.append({"role": "assistant", "content": full_response})
|
| 133 |
+
cl.user_session.set("message_history", message_history)
|
| 134 |
+
|
| 135 |
+
except Exception as e:
|
| 136 |
+
error_message = await handle_error(e)
|
| 137 |
+
# Update the response_message with the error text
|
| 138 |
+
try:
|
| 139 |
+
response_message.content = error_message
|
| 140 |
+
await response_message.update()
|
| 141 |
+
except Exception:
|
| 142 |
+
# If updating fails, send a new message
|
| 143 |
+
await cl.Message(content=error_message).send()
|