charesz commited on
Commit
60612b8
·
verified ·
1 Parent(s): 5d2acfe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -132
app.py CHANGED
@@ -1,82 +1,52 @@
1
  import streamlit as st
2
- from huggingface_hub import InferenceClient
3
  import google.generativeai as genai
4
 
5
  # -------------------
6
- # API Keys Setup
7
  # -------------------
8
- huggingface_token = st.secrets.get("HUGGINGFACE_HUB_TOKEN", "")
9
  gemini_api_key = st.secrets.get("GEN_API_KEY", "")
10
 
11
  # -------------------
12
- # Configuration
13
  # -------------------
14
  st.set_page_config(page_title="Your AI Buddy", layout="wide")
15
  st.title("💡 Need answers? Just type below!")
16
 
17
- # List of recommended Hugging Face models (small ones to avoid storage issues)
18
- HF_RECOMMENDED_MODELS = ["mistralai/Mistral-7B-v0.1"]
19
-
20
  # -------------------
21
- # Sidebar Settings
22
  # -------------------
23
- st.sidebar.title("⚙️ Settings")
24
- provider = st.sidebar.selectbox("Provider", ["Hugging Face", "Gemini"])
 
25
 
26
- # -------------------
27
- # Provider Setup
28
- # -------------------
29
- client = None
30
- model = None
31
 
32
- if provider == "Hugging Face":
33
- if not huggingface_token:
34
- st.error("⚠️ Please set your 'HUGGINGFACE_HUB_TOKEN' in Streamlit secrets.")
35
- st.stop()
 
36
 
37
- selected_models = st.sidebar.multiselect(
38
- "Choose HF models",
39
- HF_RECOMMENDED_MODELS,
40
- default=[HF_RECOMMENDED_MODELS[0]]
41
- )
42
- if not selected_models:
43
- st.warning("⚠️ Please select at least one Hugging Face model.")
44
- st.stop()
45
 
46
- client = InferenceClient(token=huggingface_token)
 
 
47
 
48
- elif provider == "Gemini":
49
- if not gemini_api_key:
50
- st.error("⚠️ Please set your 'GEN_API_KEY' in Streamlit secrets.")
51
- st.stop()
52
-
53
- genai.configure(api_key=gemini_api_key)
54
 
55
- # Fetch available models that support generateContent
56
- available_models = [
57
- m.name for m in genai.list_models()
58
- if "generateContent" in m.supported_generation_methods
59
- ]
60
- if not available_models:
61
- st.error("⚠️ No Gemini models available for your API key.")
 
62
  st.stop()
63
 
64
- # Reset session if old model is invalid
65
- if "model" in st.session_state and st.session_state["model"] not in available_models:
66
- del st.session_state["model"]
67
-
68
- model = st.sidebar.selectbox("Model", available_models, index=0)
69
-
70
- # Initialize Gemini chat if model changes or not initialized
71
- if "gemini_chat" not in st.session_state or st.session_state.get("model") != model:
72
- st.session_state.model = model
73
- try:
74
- gemini_model = genai.GenerativeModel(model)
75
- st.session_state.gemini_chat = gemini_model.start_chat(history=[])
76
- except Exception as e:
77
- st.error(f"⚠️ Could not initialize Gemini model: {e}")
78
- st.stop()
79
-
80
  # -------------------
81
  # System Prompt
82
  # -------------------
@@ -94,9 +64,8 @@ if "messages" not in st.session_state:
94
  # Reset conversation button
95
  if st.sidebar.button("Reset Conversation"):
96
  st.session_state.messages = []
97
- if provider == "Gemini" and model:
98
- gemini_model = genai.GenerativeModel(model)
99
- st.session_state.gemini_chat = gemini_model.start_chat(history=[])
100
  st.rerun()
101
 
102
  # -------------------
@@ -110,78 +79,21 @@ for msg in st.session_state.messages:
110
  # User Input
111
  # -------------------
112
  if user_input := st.chat_input("Type your message..."):
 
113
  st.chat_message("user").markdown(user_input)
114
  st.session_state.messages.append({"role": "user", "content": user_input})
115
 
116
- # -------------------
117
- # Hugging Face Logic
118
- # -------------------
119
- if provider == "Hugging Face":
120
- for m in selected_models:
121
- with st.chat_message("assistant"):
122
- message_placeholder = st.empty()
123
- message_placeholder.markdown(f"**{m}** is generating...")
124
-
125
- try:
126
- # Prepare conversation history
127
- conv = "\n".join([
128
- f"{msg['role']}: {msg['content']}"
129
- for msg in st.session_state.messages
130
- ])
131
- prompt_text = f"{system_prompt}\n\n{conv}\nassistant:"
132
-
133
- # Always use text_generation (works for GPT, Bloom, Flan-T5)
134
- resp = client.text_generation(
135
- model=m,
136
- prompt=prompt_text,
137
- max_new_tokens=256,
138
- temperature=0.7,
139
- stop_sequences=["assistant:", "user:"],
140
- stream=False
141
- )
142
-
143
- # Parse response
144
- if isinstance(resp, str):
145
- bot_text = resp
146
- elif isinstance(resp, dict):
147
- bot_text = resp.get("generated_text", str(resp))
148
- elif isinstance(resp, list) and "generated_text" in resp[0]:
149
- bot_text = resp[0]["generated_text"]
150
- else:
151
- bot_text = "⚠️ Unexpected response format."
152
-
153
- # Strip if model repeats the prompt
154
- if bot_text.startswith(prompt_text):
155
- bot_text = bot_text[len(prompt_text):].strip()
156
-
157
- except Exception as e:
158
- bot_text = f"⚠️ Error with **{m}**: {type(e).__name__}: {e}"
159
-
160
- # Display final response
161
- final_response = f"**{m}**\n\n{bot_text}"
162
- message_placeholder.markdown(final_response)
163
-
164
- st.session_state.messages.append(
165
- {"role": "assistant", "content": final_response}
166
- )
167
- st.rerun()
168
-
169
- # -------------------
170
- # Gemini Logic
171
- # -------------------
172
- elif provider == "Gemini":
173
- try:
174
- if user_input.strip():
175
- with st.spinner("Gemini is thinking..."):
176
- resp = st.session_state.gemini_chat.send_message(user_input)
177
- bot_text = resp.text
178
- else:
179
- bot_text = "⚠️ Please enter a message before sending."
180
- except Exception as e:
181
- bot_text = f"⚠️ Gemini could not respond right now. Please try again. ({e})"
182
-
183
- with st.chat_message("assistant"):
184
- st.markdown(bot_text)
185
-
186
- st.session_state.messages.append({"role": "assistant", "content": bot_text})
187
- st.rerun()
 
1
  import streamlit as st
 
2
  import google.generativeai as genai
3
 
4
  # -------------------
5
+ # API Key Setup
6
  # -------------------
 
7
  gemini_api_key = st.secrets.get("GEN_API_KEY", "")
8
 
9
  # -------------------
10
+ # Page Configuration
11
  # -------------------
12
  st.set_page_config(page_title="Your AI Buddy", layout="wide")
13
  st.title("💡 Need answers? Just type below!")
14
 
 
 
 
15
  # -------------------
16
+ # Gemini Setup
17
  # -------------------
18
+ if not gemini_api_key:
19
+ st.error("⚠️ Please set your 'GEN_API_KEY' in Streamlit secrets.")
20
+ st.stop()
21
 
22
+ genai.configure(api_key=gemini_api_key)
 
 
 
 
23
 
24
+ # Fetch available Gemini models
25
+ available_models = [
26
+ m.name for m in genai.list_models()
27
+ if "generateContent" in m.supported_generation_methods
28
+ ]
29
 
30
+ if not available_models:
31
+ st.error("⚠️ No Gemini models available for your API key.")
32
+ st.stop()
 
 
 
 
 
33
 
34
+ # Reset session if old model is invalid
35
+ if "model" in st.session_state and st.session_state["model"] not in available_models:
36
+ del st.session_state["model"]
37
 
38
+ model = st.sidebar.selectbox("Model", available_models, index=0)
 
 
 
 
 
39
 
40
+ # Initialize Gemini chat if needed
41
+ if "gemini_chat" not in st.session_state or st.session_state.get("model") != model:
42
+ st.session_state.model = model
43
+ try:
44
+ gemini_model = genai.GenerativeModel(model)
45
+ st.session_state.gemini_chat = gemini_model.start_chat(history=[])
46
+ except Exception as e:
47
+ st.error(f"⚠️ Could not initialize Gemini model: {e}")
48
  st.stop()
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  # -------------------
51
  # System Prompt
52
  # -------------------
 
64
  # Reset conversation button
65
  if st.sidebar.button("Reset Conversation"):
66
  st.session_state.messages = []
67
+ gemini_model = genai.GenerativeModel(model)
68
+ st.session_state.gemini_chat = gemini_model.start_chat(history=[])
 
69
  st.rerun()
70
 
71
  # -------------------
 
79
  # User Input
80
  # -------------------
81
  if user_input := st.chat_input("Type your message..."):
82
+ # Show user message
83
  st.chat_message("user").markdown(user_input)
84
  st.session_state.messages.append({"role": "user", "content": user_input})
85
 
86
+ try:
87
+ with st.spinner("Gemini is thinking..."):
88
+ # Prepend system prompt for context
89
+ full_input = f"{system_prompt}\n\nUser: {user_input}"
90
+ resp = st.session_state.gemini_chat.send_message(full_input)
91
+ bot_text = resp.text
92
+ except Exception as e:
93
+ bot_text = f"⚠️ Gemini could not respond right now. Please try again. ({e})"
94
+
95
+ with st.chat_message("assistant"):
96
+ st.markdown(bot_text)
97
+
98
+ st.session_state.messages.append({"role": "assistant", "content": bot_text})
99
+ st.rerun()