charesz commited on
Commit
9421310
·
verified ·
1 Parent(s): a352e2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +145 -108
app.py CHANGED
@@ -1,131 +1,168 @@
1
  import streamlit as st
2
  import google.generativeai as genai
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- # -------------------
5
- # API Key Setup
6
- # -------------------
7
- gemini_api_key = st.secrets.get("GEN_API_KEY", "")
8
-
9
- # -------------------
10
- # Page Configuration
11
- # -------------------
12
- st.set_page_config(page_title="Your AI Buddy", layout="wide")
13
- st.title("💡 Need answers? Just type below!")
14
-
15
- # -------------------
16
- # Gemini Setup
17
- # -------------------
18
- if not gemini_api_key:
19
- st.error("⚠️ Please set your 'GEN_API_KEY' in Streamlit secrets.")
20
- st.stop()
21
-
22
- genai.configure(api_key=gemini_api_key)
23
 
24
- # Fetch available Gemini models
25
- available_models = [
26
- m.name for m in genai.list_models()
27
- if "generateContent" in m.supported_generation_methods
28
- ]
 
 
 
 
 
 
 
29
 
30
- if not available_models:
31
- st.error("⚠️ No Gemini models available for your API key.")
32
- st.stop()
33
 
34
- # Reset session if old model is invalid
35
- if "model" in st.session_state and st.session_state["model"] not in available_models:
36
- del st.session_state["model"]
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- model = st.sidebar.selectbox("Model", available_models, index=0)
39
 
40
- # Initialize Gemini chat if needed
41
- if "gemini_chat" not in st.session_state or st.session_state.get("model") != model:
42
- st.session_state.model = model
43
  try:
44
- gemini_model = genai.GenerativeModel(model)
45
- st.session_state.gemini_chat = gemini_model.start_chat(history=[])
 
 
 
 
 
 
 
46
  except Exception as e:
47
- st.error(f"⚠️ Could not initialize Gemini model: {e}")
48
  st.stop()
49
 
50
- # -------------------
51
- # Sidebar Options
52
- # -------------------
53
- system_prompt = st.sidebar.text_area(
54
- "System Prompt",
55
- "You are a helpful AI assistant. Provide concise and accurate answers."
 
56
  )
57
 
58
- if "messages" not in st.session_state:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  st.session_state.messages = []
 
60
 
61
- # Reset conversation button
62
- if st.sidebar.button("Reset Conversation"):
63
- st.session_state.messages = []
64
- gemini_model = genai.GenerativeModel(model)
65
- st.session_state.gemini_chat = gemini_model.start_chat(history=[])
66
- st.rerun()
67
-
68
- # -------------------
69
- # Custom Chat Bubble Styling
70
- # -------------------
71
- st.markdown("""
72
- <style>
73
- /* General chat bubble styling */
74
- .stChatMessage > div {
75
- border-radius: 15px;
76
- padding: 10px 14px;
77
- margin: 6px 0;
78
- max-width: 80%;
79
- font-size: 15px;
80
- }
81
-
82
- /* User messages (right side) */
83
- .stChatMessage[data-testid="stChatMessage-user"] > div {
84
- background-color: #d1e9ff;
85
- color: #0a2f5c;
86
- border: 1px solid #a3cfff;
87
- margin-left: auto;
88
- text-align: right;
89
- }
90
-
91
- /* Assistant messages (left side) */
92
- .stChatMessage[data-testid="stChatMessage-assistant"] > div {
93
- background-color: #f1f1f1;
94
- color: #1a1a1a;
95
- border: 1px solid #ddd;
96
- margin-right: auto;
97
- text-align: left;
98
- }
99
-
100
- /* Avatars */
101
- .stChatMessage [data-testid="stChatAvatar"] {
102
- border-radius: 50%;
103
- width: 32px;
104
- height: 32px;
105
- font-size: 18px;
106
- background-color: #ffffff;
107
- display: flex;
108
- align-items: center;
109
- justify-content: center;
110
- box-shadow: 0 1px 3px rgba(0,0,0,0.2);
111
- }
112
- </style>
113
- """, unsafe_allow_html=True)
114
-
115
- # -------------------
116
- # Display Chat Messages
117
- # -------------------
118
  for msg in st.session_state.messages:
119
  with st.chat_message(msg["role"]):
120
  st.markdown(msg["content"])
121
 
122
- # -------------------
123
- # User Input
124
- # -------------------
125
- if user_input := st.chat_input("Type your message..."):
126
- # Show user message
127
- st.chat_message("user").markdown(user_input)
128
  st.session_state.messages.append({"role": "user", "content": user_input})
 
 
129
 
 
130
  try:
131
- with st.spinner("Gemini is thinking..."):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import google.generativeai as genai
3
+ from google.generativeai import types
4
+
5
+ # --- CONFIGURATION ---
6
+ # Task 3: Use Case Adaptation (Academic Tutor)
7
+ APP_TITLE = "🧠 Academic Tutor Assistant powered by Gemini"
8
+ DEFAULT_SYSTEM_PROMPT = (
9
+ "You are an expert **Academic Tutor**. Your goal is to help the user understand complex "
10
+ "concepts, review essays, solve problems, and provide study guidance. "
11
+ "Respond with clear, structured, and encouraging explanations. "
12
+ "Always ask a follow-up question to encourage deeper learning."
13
+ )
14
 
15
+ # --- HELPER FUNCTIONS ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ @st.cache_resource(show_spinner="Connecting to Gemini...")
18
+ def setup_gemini_client(api_key):
19
+ """Initializes and configures the Gemini client."""
20
+ if not api_key:
21
+ st.error("⚠️ Please set your 'GEN_API_KEY' in Streamlit secrets.")
22
+ st.stop()
23
+ try:
24
+ genai.configure(api_key=api_key)
25
+ return genai
26
+ except Exception as e:
27
+ st.error(f"⚠️ Error configuring Gemini client: {e}")
28
+ st.stop()
29
 
 
 
 
30
 
31
+ def get_available_models():
32
+ """Fetches and filters available chat-capable models."""
33
+ try:
34
+ available_models = [
35
+ m.name for m in genai.list_models()
36
+ if "generateContent" in m.supported_generation_methods
37
+ and ("gemini-2.5" in m.name or "gemini-1.5" in m.name) # Filter for high-quality models
38
+ ]
39
+ if not available_models:
40
+ st.error("⚠️ No suitable Gemini models available for your API key.")
41
+ st.stop()
42
+ return available_models
43
+ except Exception as e:
44
+ st.error(f"⚠️ Error fetching models: {e}")
45
+ st.stop()
46
 
 
47
 
48
+ def init_chat_session(model_name, system_prompt):
49
+ """Initializes a new Gemini chat session with a system instruction."""
 
50
  try:
51
+ # Task 2: Proper System Prompt Injection via config
52
+ config = types.GenerateContentConfig(
53
+ system_instruction=system_prompt
54
+ )
55
+ gemini_model = genai.GenerativeModel(model_name)
56
+ st.session_state.gemini_chat = gemini_model.start_chat(
57
+ history=[],
58
+ config=config # Apply the system instruction here
59
+ )
60
  except Exception as e:
61
+ st.error(f"⚠️ Could not initialize Gemini model '{model_name}': {e}")
62
  st.stop()
63
 
64
+ # --- STREAMLIT PAGE SETUP ---
65
+
66
+ # 1. Page Configuration (Task 4: UI Improvement)
67
+ st.set_page_config(page_title=APP_TITLE, layout="wide", initial_sidebar_state="expanded")
68
+ st.title(APP_TITLE)
69
+ st.markdown(
70
+ "Ask any academic question, paste your homework problem, or submit a paragraph for review. Let's learn together!"
71
  )
72
 
73
+ # 2. API Key and Client Setup
74
+ gemini_api_key = st.secrets.get("GEN_API_KEY", "")
75
+ gemini_client = setup_gemini_client(gemini_api_key)
76
+
77
+
78
+ # 3. Sidebar Controls (Task 4: Usability Enhancement)
79
+ with st.sidebar:
80
+ st.header("⚙️ Settings")
81
+
82
+ # Model Selection
83
+ available_models = get_available_models()
84
+ default_index = 0
85
+ if "gemini-2.5-flash" in available_models:
86
+ default_index = available_models.index("gemini-2.5-flash")
87
+
88
+ new_model = st.selectbox(
89
+ "Select Model",
90
+ available_models,
91
+ index=default_index,
92
+ key="_selected_model_name"
93
+ )
94
+
95
+ # System Prompt (Task 3: Use Case Adaptation)
96
+ system_prompt_key = "_system_prompt_area"
97
+ system_prompt = st.text_area(
98
+ "Tutor Persona / System Instruction",
99
+ DEFAULT_SYSTEM_PROMPT,
100
+ height=180,
101
+ key=system_prompt_key
102
+ )
103
+
104
+ # Check for configuration change and re-initialize chat
105
+ if (st.session_state.get("model") != new_model) or \
106
+ (st.session_state.get("system_prompt") != system_prompt):
107
+
108
+ # Save new values and re-initialize chat
109
+ st.session_state.model = new_model
110
+ st.session_state.system_prompt = system_prompt
111
+ st.session_state.messages = []
112
+ init_chat_session(new_model, system_prompt)
113
+ st.info("Tutor persona updated. Conversation restarted.")
114
+
115
+ # Reset Conversation Button (Task 4: Usability Enhancement)
116
+ if st.button("🔄 Reset Conversation"):
117
+ st.session_state.messages = []
118
+ init_chat_session(st.session_state.model, st.session_state.system_prompt)
119
+ # Note: No st.rerun() needed due to Streamlit's natural flow
120
+
121
+ # 4. Chat Initialization (First run check)
122
+ if "gemini_chat" not in st.session_state:
123
+ st.session_state.model = new_model
124
+ st.session_state.system_prompt = system_prompt
125
  st.session_state.messages = []
126
+ init_chat_session(new_model, system_prompt)
127
 
128
+ # --- CHAT INTERACTION ---
129
+
130
+ # 5. Display Chat Messages (Task 4: Visualization)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  for msg in st.session_state.messages:
132
  with st.chat_message(msg["role"]):
133
  st.markdown(msg["content"])
134
 
135
+
136
+ # 6. User Input (Task 4: Placeholder)
137
+ if user_input := st.chat_input("Ask about history, math, or review your essay draft..."):
138
+ # 6a. Add and display user message
 
 
139
  st.session_state.messages.append({"role": "user", "content": user_input})
140
+ with st.chat_message("user"):
141
+ st.markdown(user_input)
142
 
143
+ # 6b. Send message to Gemini
144
  try:
145
+ with st.spinner("💡 Tutor is thinking..."):
146
+ # The system prompt is in the config, so we only send the user's message.
147
+ response = st.session_state.gemini_chat.send_message(user_input, stream=True)
148
+
149
+ # Display response chunks as they arrive (Streaming for better UX)
150
+ full_response = ""
151
+ with st.chat_message("assistant"):
152
+ message_placeholder = st.empty()
153
+ for chunk in response:
154
+ full_response += chunk.text
155
+ message_placeholder.markdown(full_response + "▌")
156
+ message_placeholder.markdown(full_response) # Final rendering
157
+
158
+ bot_text = full_response
159
+
160
+ except Exception as e:
161
+ bot_text = f"⚠️ The tutor could not respond right now. Please try again. Error: {e}"
162
+ with st.chat_message("assistant"):
163
+ st.markdown(bot_text)
164
+
165
+ # 6c. Save assistant message
166
+ st.session_state.messages.append({"role": "assistant", "content": bot_text})
167
+
168
+ # Task 4: Removed st.rerun() for better stability.