charesz commited on
Commit
a22ecb6
·
verified ·
1 Parent(s): 0cd092a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -129
app.py CHANGED
@@ -1,84 +1,52 @@
1
  import streamlit as st
2
- from huggingface_hub import InferenceClient
3
  import google.generativeai as genai
4
 
5
  # -------------------
6
- # API Keys Setup
7
  # -------------------
8
- huggingface_token = st.secrets.get("HUGGINGFACE_HUB_TOKEN", "")
9
  gemini_api_key = st.secrets.get("GEN_API_KEY", "")
10
 
11
  # -------------------
12
- # Configuration
13
  # -------------------
14
  st.set_page_config(page_title="Your AI Buddy", layout="wide")
15
  st.title("💡 Need answers? Just type below!")
16
 
17
- # List of recommended Hugging Face models (small ones to avoid storage issues)
18
- HF_RECOMMENDED_MODELS = ["google/flan-t5-large"]
19
-
20
- # -------------------
21
- # Sidebar Settings
22
  # -------------------
23
- st.sidebar.title("⚙️ Settings")
24
- provider = st.sidebar.selectbox("Provider", ["Hugging Face", "Gemini"])
25
-
26
  # -------------------
27
- # Provider Setup
28
- # -------------------
29
- client = None
30
- model = None
31
-
32
- if provider == "Hugging Face":
33
- if not huggingface_token:
34
- st.error("⚠️ Please set your 'HUGGINGFACE_HUB_TOKEN' in Streamlit secrets.")
35
- st.stop()
36
 
37
- selected_models = st.sidebar.multiselect(
38
- "Choose HF models",
39
- HF_RECOMMENDED_MODELS,
40
- default=[HF_RECOMMENDED_MODELS[0]]
41
- )
42
 
43
- if not selected_models:
44
- st.warning("⚠️ Please select at least one Hugging Face model.")
45
- st.stop()
 
 
46
 
47
- client = InferenceClient(token=huggingface_token)
 
 
48
 
49
- elif provider == "Gemini":
50
- if not gemini_api_key:
51
- st.error("⚠️ Please set your 'GEN_API_KEY' in Streamlit secrets.")
52
- st.stop()
53
 
54
- genai.configure(api_key=gemini_api_key)
55
 
56
- # Fetch available models that support generateContent
57
- available_models = [
58
- m.name for m in genai.list_models()
59
- if "generateContent" in m.supported_generation_methods
60
- ]
61
-
62
- if not available_models:
63
- st.error("⚠️ No Gemini models available for your API key.")
64
  st.stop()
65
 
66
- # Reset session if old model is invalid
67
- if "model" in st.session_state and st.session_state["model"] not in available_models:
68
- del st.session_state["model"]
69
-
70
- model = st.sidebar.selectbox("Model", available_models, index=0)
71
-
72
- # Initialize Gemini chat if model changes or not initialized
73
- if "gemini_chat" not in st.session_state or st.session_state.get("model") != model:
74
- st.session_state.model = model
75
- try:
76
- gemini_model = genai.GenerativeModel(model)
77
- st.session_state.gemini_chat = gemini_model.start_chat(history=[])
78
- except Exception as e:
79
- st.error(f"⚠️ Could not initialize Gemini model: {e}")
80
- st.stop()
81
-
82
  # -------------------
83
  # System Prompt
84
  # -------------------
@@ -96,9 +64,8 @@ if "messages" not in st.session_state:
96
  # Reset conversation button
97
  if st.sidebar.button("Reset Conversation"):
98
  st.session_state.messages = []
99
- if provider == "Gemini" and model:
100
- gemini_model = genai.GenerativeModel(model)
101
- st.session_state.gemini_chat = gemini_model.start_chat(history=[])
102
  st.rerun()
103
 
104
  # -------------------
@@ -112,73 +79,21 @@ for msg in st.session_state.messages:
112
  # User Input
113
  # -------------------
114
  if user_input := st.chat_input("Type your message..."):
 
115
  st.chat_message("user").markdown(user_input)
116
  st.session_state.messages.append({"role": "user", "content": user_input})
117
 
118
- # -------------------
119
- # Hugging Face Logic
120
- # -------------------
121
- if provider == "Hugging Face":
122
- for m in selected_models:
123
- with st.chat_message("assistant"):
124
- message_placeholder = st.empty()
125
- message_placeholder.markdown(f"**{m}** is generating...")
126
-
127
- try:
128
- # Prepare conversation history
129
- conv = "\n".join([f"{msg['role']}: {msg['content']}" for msg in st.session_state.messages])
130
- prompt_text = f"{system_prompt}\n\n{conv}\nassistant:"
131
-
132
- # Always use text_generation (works for GPT, Bloom, Flan-T5)
133
- resp = client.text_generation(
134
- model=m,
135
- prompt=prompt_text,
136
- max_new_tokens=256,
137
- temperature=0.7,
138
- stop_sequences=["assistant:", "user:"],
139
- stream=False
140
- )
141
-
142
- # Parse response
143
- if isinstance(resp, str):
144
- bot_text = resp
145
- elif isinstance(resp, dict):
146
- bot_text = resp.get("generated_text", str(resp))
147
- elif isinstance(resp, list) and "generated_text" in resp[0]:
148
- bot_text = resp[0]["generated_text"]
149
- else:
150
- bot_text = "⚠️ Unexpected response format."
151
-
152
- # Strip if model repeats the prompt
153
- if bot_text.startswith(prompt_text):
154
- bot_text = bot_text[len(prompt_text):].strip()
155
-
156
- except Exception as e:
157
- bot_text = f"⚠️ Error with **{m}**: {type(e).__name__}: {e}"
158
-
159
- # Display final response
160
- final_response = f"**{m}**\n\n{bot_text}"
161
- message_placeholder.markdown(final_response)
162
- st.session_state.messages.append({"role": "assistant", "content": final_response})
163
- st.rerun()
164
-
165
-
166
- # -------------------
167
- # Gemini Logic
168
- # -------------------
169
- elif provider == "Gemini":
170
- try:
171
- if user_input.strip():
172
- with st.spinner("Gemini is thinking..."):
173
- resp = st.session_state.gemini_chat.send_message(user_input)
174
- bot_text = resp.text
175
- else:
176
- bot_text = "⚠️ Please enter a message before sending."
177
- except Exception as e:
178
- bot_text = f"⚠️ Gemini could not respond right now. Please try again. ({e})"
179
-
180
- with st.chat_message("assistant"):
181
- st.markdown(bot_text)
182
-
183
- st.session_state.messages.append({"role": "assistant", "content": bot_text})
184
- st.rerun()
 
1
  import streamlit as st
 
2
  import google.generativeai as genai
3
 
4
  # -------------------
5
+ # API Key Setup
6
  # -------------------
 
7
  gemini_api_key = st.secrets.get("GEN_API_KEY", "")
8
 
9
  # -------------------
10
+ # Page Configuration
11
  # -------------------
12
  st.set_page_config(page_title="Your AI Buddy", layout="wide")
13
  st.title("💡 Need answers? Just type below!")
14
 
 
 
 
 
 
15
  # -------------------
16
+ # Gemini Setup
 
 
17
  # -------------------
18
+ if not gemini_api_key:
19
+ st.error("⚠️ Please set your 'GEN_API_KEY' in Streamlit secrets.")
20
+ st.stop()
 
 
 
 
 
 
21
 
22
+ genai.configure(api_key=gemini_api_key)
 
 
 
 
23
 
24
+ # Fetch available Gemini models
25
+ available_models = [
26
+ m.name for m in genai.list_models()
27
+ if "generateContent" in m.supported_generation_methods
28
+ ]
29
 
30
+ if not available_models:
31
+ st.error("⚠️ No Gemini models available for your API key.")
32
+ st.stop()
33
 
34
+ # Reset session if old model is invalid
35
+ if "model" in st.session_state and st.session_state["model"] not in available_models:
36
+ del st.session_state["model"]
 
37
 
38
+ model = st.sidebar.selectbox("Model", available_models, index=0)
39
 
40
+ # Initialize Gemini chat if needed
41
+ if "gemini_chat" not in st.session_state or st.session_state.get("model") != model:
42
+ st.session_state.model = model
43
+ try:
44
+ gemini_model = genai.GenerativeModel(model)
45
+ st.session_state.gemini_chat = gemini_model.start_chat(history=[])
46
+ except Exception as e:
47
+ st.error(f"⚠️ Could not initialize Gemini model: {e}")
48
  st.stop()
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  # -------------------
51
  # System Prompt
52
  # -------------------
 
64
  # Reset conversation button
65
  if st.sidebar.button("Reset Conversation"):
66
  st.session_state.messages = []
67
+ gemini_model = genai.GenerativeModel(model)
68
+ st.session_state.gemini_chat = gemini_model.start_chat(history=[])
 
69
  st.rerun()
70
 
71
  # -------------------
 
79
  # User Input
80
  # -------------------
81
  if user_input := st.chat_input("Type your message..."):
82
+ # Show user message
83
  st.chat_message("user").markdown(user_input)
84
  st.session_state.messages.append({"role": "user", "content": user_input})
85
 
86
+ try:
87
+ with st.spinner("Gemini is thinking..."):
88
+ # Prepend system prompt for context
89
+ full_input = f"{system_prompt}\n\nUser: {user_input}"
90
+ resp = st.session_state.gemini_chat.send_message(full_input)
91
+ bot_text = resp.text
92
+ except Exception as e:
93
+ bot_text = f"⚠️ Gemini could not respond right now. Please try again. ({e})"
94
+
95
+ with st.chat_message("assistant"):
96
+ st.markdown(bot_text)
97
+
98
+ st.session_state.messages.append({"role": "assistant", "content": bot_text})
99
+ st.rerun()