AroojImtiaz commited on
Commit
513fbaf
·
verified ·
1 Parent(s): ba6cd46

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -30
app.py CHANGED
@@ -1,58 +1,126 @@
1
  # app.py
2
  import os
 
3
  import asyncio
 
 
4
  from dotenv import load_dotenv
 
 
5
  import chainlit as cl
6
  import google.generativeai as genai
7
 
8
- # Load environment variables from .env
9
  load_dotenv()
10
- API_KEY = os.getenv("GOOGLE_API_KEY")
11
- MODEL_NAME = os.getenv("MODEL_NAME", "gemini-2.0-flash")
12
 
 
 
13
  if not API_KEY:
14
- raise RuntimeError("GOOGLE_API_KEY not set in environment or .env")
15
 
16
- # Configure Google Generative AI
 
 
 
 
17
  genai.configure(api_key=API_KEY)
18
 
19
- # Create model once at import time
20
- model = genai.GenerativeModel(MODEL_NAME)
 
 
 
 
 
 
 
 
21
 
22
- # Optional system prompt
23
- SYSTEM_PROMPT = "You are a helpful, friendly AI assistant. Provide clear and concise responses."
 
 
 
 
24
 
 
25
  @cl.on_chat_start
26
- async def start():
27
- # Initialize per-session message history if you want
28
  cl.user_session.set("message_history", [{"role": "system", "content": SYSTEM_PROMPT}])
29
- await cl.Message(content="Hello! I'm your Gemini-powered assistant. Ask me anything!").send()
30
 
 
31
  @cl.on_message
32
  async def handle_message(message: cl.Message):
33
- text = (message.content or "").strip()
34
- if not text:
35
- await cl.Message(content="Please type something to ask.").send()
36
  return
37
 
38
- # Get session history (optional: used only for bookkeeping)
39
- message_history = cl.user_session.get("message_history", [])
40
- message_history.append({"role": "user", "content": text})
41
- cl.user_session.set("message_history", message_history)
 
 
 
 
 
42
 
43
- # Use generate_content (simple single-response call).
44
- # Run the blocking call in a thread to avoid blocking Chainlit's event loop.
45
  try:
46
- resp = await asyncio.to_thread(model.generate_content, text)
47
- reply = getattr(resp, "text", None) or str(resp)
 
 
 
 
48
  except Exception as e:
49
- reply = f"[Error calling Gemini API] {e}"
 
 
 
 
 
 
 
 
50
 
51
- # send reply and add to history
52
- await cl.Message(content=reply).send()
53
- message_history.append({"role": "assistant", "content": reply})
54
- cl.user_session.set("message_history", message_history)
55
 
56
- # NOTE: Do not call cl.run() if you will use the CLI `chainlit run app.py`.
 
 
 
57
  if __name__ == "__main__":
58
- cl.run(host="0.0.0.0", port=int(os.environ.get("PORT", 8000)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # app.py
2
  import os
3
+ import sys
4
  import asyncio
5
+ import subprocess
6
+ import shutil
7
  from dotenv import load_dotenv
8
+
9
+ # third-party imports (must be in requirements.txt)
10
  import chainlit as cl
11
  import google.generativeai as genai
12
 
13
+ # Load environment variables from .env if present
14
  load_dotenv()
 
 
15
 
16
+ # Required env var: GOOGLE_API_KEY
17
+ API_KEY = os.getenv("GOOGLE_API_KEY")
18
  if not API_KEY:
19
+ raise RuntimeError("GOOGLE_API_KEY is not set. Add it to the environment or .env file.")
20
 
21
+ # Optional: choose model via env, default to gemini-2.0-flash
22
+ MODEL_NAME = os.getenv("MODEL_NAME", "gemini-2.0-flash")
23
+ PORT = int(os.getenv("PORT", 8000))
24
+
25
+ # Configure the Google Generative client
26
  genai.configure(api_key=API_KEY)
27
 
28
+ # Instantiate model at import time (so handlers can reference it)
29
+ try:
30
+ model = genai.GenerativeModel(MODEL_NAME)
31
+ except Exception as e:
32
+ # If model initialization fails at import, keep model None but register handlers
33
+ model = None
34
+ print(f"[Warning] Failed to initialize GenerativeModel('{MODEL_NAME}') at import time: {e}", file=sys.stderr)
35
+
36
+ # Simple system prompt used for per-session history (optional)
37
+ SYSTEM_PROMPT = "You are a helpful, friendly AI assistant. Provide clear and concise answers."
38
 
39
+ # Helper to run blocking model.generate_content in a background thread
40
+ async def _generate_content_in_thread(prompt_text: str):
41
+ if model is None:
42
+ raise RuntimeError("Generative model not initialized.")
43
+ # run blocking call in thread
44
+ return await asyncio.to_thread(model.generate_content, prompt_text)
45
 
46
+ # Register a welcome message per new chat session
47
  @cl.on_chat_start
48
+ async def on_chat_start():
49
+ # initialize message history for this user session (optional bookkeeping)
50
  cl.user_session.set("message_history", [{"role": "system", "content": SYSTEM_PROMPT}])
51
+ await cl.Message(content="Hello! I'm your Gemini-powered assistant. Ask me anything.").send()
52
 
53
+ # Main message handler
54
  @cl.on_message
55
  async def handle_message(message: cl.Message):
56
+ user_text = (message.content or "").strip()
57
+ if not user_text:
58
+ await cl.Message(content="Please type a question or prompt.").send()
59
  return
60
 
61
+ # Optionally keep a local session history (not sent to model in this simple generate_content flow,
62
+ # but useful for UI/logging)
63
+ history = cl.user_session.get("message_history", [])
64
+ history.append({"role": "user", "content": user_text})
65
+ cl.user_session.set("message_history", history)
66
+
67
+ # Prepare an empty message so we can update progressively if desired.
68
+ progress_msg = cl.Message(content="(Thinking...)")
69
+ await progress_msg.send()
70
 
 
 
71
  try:
72
+ # Call the blocking generation in a background thread to avoid blocking the event loop.
73
+ resp = await _generate_content_in_thread(user_text)
74
+
75
+ # defensive: many responses expose .text
76
+ reply_text = getattr(resp, "text", None) or str(resp)
77
+
78
  except Exception as e:
79
+ reply_text = f"[Error calling Gemini API] {e}"
80
+
81
+ # Send final reply (update the progress message)
82
+ try:
83
+ progress_msg.content = reply_text
84
+ await progress_msg.update()
85
+ except Exception:
86
+ # If update fails, send a new message
87
+ await cl.Message(content=reply_text).send()
88
 
89
+ # Save assistant reply to session history
90
+ history.append({"role": "assistant", "content": reply_text})
91
+ cl.user_session.set("message_history", history)
 
92
 
93
+
94
+ # When launched directly (python app.py), start the Chainlit CLI as a subprocess.
95
+ # This avoids calling cl.run() directly and is compatible with hosting environments
96
+ # (like Hugging Face Spaces) that run the script.
97
  if __name__ == "__main__":
98
+ # Prefer using the same Python interpreter to run python -m chainlit so module paths match.
99
+ # First check if 'chainlit' CLI is available
100
+ cli_path = shutil.which("chainlit")
101
+
102
+ # Build the CLI command: prefer "python -m chainlit run app.py" for environment consistency
103
+ if cli_path is None:
104
+ # fallback to python -m chainlit
105
+ cmd = [sys.executable, "-m", "chainlit", "run", "app.py", "--host", "0.0.0.0", "--port", str(PORT)]
106
+ else:
107
+ # If chainlit CLI is on PATH, call it directly
108
+ cmd = [cli_path, "run", "app.py", "--host", "0.0.0.0", "--port", str(PORT)]
109
+
110
+ print("Starting Chainlit via CLI command:", " ".join(cmd), file=sys.stderr)
111
+
112
+ # Launch the CLI as a subprocess. This blocks until process exits.
113
+ # On hosted platforms the parent process will be monitored/killed by the host as usual.
114
+ try:
115
+ # Use exec-like run so SIGINT/G signals behave reasonably
116
+ return_code = subprocess.call(cmd)
117
+ if return_code != 0:
118
+ print(f"[chainlit exit] process returned code: {return_code}", file=sys.stderr)
119
+ sys.exit(return_code)
120
+ except FileNotFoundError as fnf:
121
+ print("[Error] chainlit CLI not found and python -m chainlit failed.", file=sys.stderr)
122
+ print("Ensure chainlit is installed in the environment (pip install chainlit).", file=sys.stderr)
123
+ sys.exit(1)
124
+ except Exception as e:
125
+ print(f"[Error] Failed to launch Chainlit CLI: {e}", file=sys.stderr)
126
+ sys.exit(1)