charesz commited on
Commit
90a8520
·
verified ·
1 Parent(s): f1048a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -133
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import streamlit as st
2
- from huggingface_hub import InferenceClient
3
- import google.generativeai as genai
 
4
 
5
  # -------------------
6
  # API Keys Setup
@@ -11,11 +12,14 @@ gemini_api_key = st.secrets.get("GEN_API_KEY", "")
11
  # -------------------
12
  # Configuration
13
  # -------------------
14
- st.set_page_config(page_title="Your AI Buddy", layout="wide")
15
- st.title("💡 Need answers? Just type below!")
16
 
17
- # List of recommended Hugging Face models (small ones to avoid storage issues)
18
- HF_RECOMMENDED_MODELS = ["google/flan-t5-large"]
 
 
 
19
 
20
  # -------------------
21
  # Sidebar Settings
@@ -23,164 +27,94 @@ HF_RECOMMENDED_MODELS = ["google/flan-t5-large"]
23
  st.sidebar.title("⚙️ Settings")
24
  provider = st.sidebar.selectbox("Provider", ["Hugging Face", "Gemini"])
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  # -------------------
27
  # Provider Setup
28
  # -------------------
29
- client = None
30
- model = None
31
 
32
  if provider == "Hugging Face":
33
  if not huggingface_token:
34
- st.error("⚠️ Please set your 'HUGGINGFACE_HUB_TOKEN' in Streamlit secrets.")
35
  st.stop()
36
 
37
  selected_models = st.sidebar.multiselect(
38
- "Choose HF models",
39
  HF_RECOMMENDED_MODELS,
40
  default=[HF_RECOMMENDED_MODELS[0]]
41
  )
 
42
  if not selected_models:
43
- st.warning("⚠️ Please select at least one Hugging Face model.")
44
  st.stop()
45
 
46
- client = InferenceClient(token=huggingface_token)
 
 
 
 
 
 
 
 
47
 
48
  elif provider == "Gemini":
49
  if not gemini_api_key:
50
- st.error("⚠️ Please set your 'GEN_API_KEY' in Streamlit secrets.")
51
  st.stop()
52
 
53
- genai.configure(api_key=gemini_api_key)
54
-
55
- # Fetch available models that support generateContent
56
- available_models = [
57
- m.name for m in genai.list_models() if "generateContent" in m.supported_generation_methods
58
- ]
59
-
60
- if not available_models:
61
- st.error("⚠️ No Gemini models available for your API key.")
62
- st.stop()
63
-
64
- # Reset session if old model is invalid
65
- if "model" in st.session_state and st.session_state["model"] not in available_models:
66
- del st.session_state["model"]
67
-
68
- model = st.sidebar.selectbox("Model", available_models, index=0)
69
-
70
- # Initialize Gemini chat if model changes or not initialized
71
- if "gemini_chat" not in st.session_state or st.session_state.get("model") != model:
72
- st.session_state.model = model
73
- try:
74
- gemini_model = genai.GenerativeModel(model)
75
- st.session_state.gemini_chat = gemini_model.start_chat(history=[])
76
- except Exception as e:
77
- st.error(f"⚠️ Could not initialize Gemini model: {e}")
78
- st.stop()
79
-
80
- # -------------------
81
- # System Prompt
82
- # -------------------
83
- system_prompt = st.sidebar.text_area(
84
- "System Prompt",
85
- "You are a helpful AI assistant. Provide concise and accurate answers."
86
- )
87
-
88
- # -------------------
89
- # Chat History State
90
- # -------------------
91
- if "messages" not in st.session_state:
92
- st.session_state.messages = []
93
 
94
- # Reset conversation button
95
- if st.sidebar.button("Reset Conversation"):
96
- st.session_state.messages = []
97
- if provider == "Gemini" and model:
98
- gemini_model = genai.GenerativeModel(model)
99
- st.session_state.gemini_chat = gemini_model.start_chat(history=[])
100
- st.rerun()
101
 
102
  # -------------------
103
- # Display Chat Messages
104
  # -------------------
105
  for msg in st.session_state.messages:
106
- with st.chat_message(msg["role"]):
107
- st.markdown(msg["content"])
 
 
108
 
109
  # -------------------
110
  # User Input
111
  # -------------------
112
  if user_input := st.chat_input("Type your message..."):
 
113
  st.chat_message("user").markdown(user_input)
114
- st.session_state.messages.append({"role": "user", "content": user_input})
115
-
116
- # -------------------
117
- # Hugging Face Logic
118
- # -------------------
119
- if provider == "Hugging Face":
120
- for m in selected_models:
121
- with st.chat_message("assistant"):
122
- message_placeholder = st.empty()
123
- message_placeholder.markdown(f"**{m}** is generating...")
124
 
 
 
 
125
  try:
126
- conv = "\n".join([f"{msg['role']}: {msg['content']}" for msg in st.session_state.messages])
127
-
128
- if "flan-t5" in m or "t5" in m:
129
- # ✅ Use text2text for T5 models
130
- prompt_text = f"{system_prompt}\n\nUser asked:\n{conv}\n\nAnswer:"
131
- resp = client.text2text_generation(
132
- model=m,
133
- inputs=prompt_text,
134
- max_new_tokens=256,
135
- temperature=0.7
136
- )
137
- else:
138
- # ✅ Use text_generation for GPT-style models
139
- stop_sequences = ["assistant:", "user:"]
140
- prompt_text = f"{system_prompt}\n\n{conv}\nassistant:"
141
- resp = client.text_generation(
142
- model=m,
143
- prompt=prompt_text,
144
- max_new_tokens=256,
145
- temperature=0.7,
146
- stop_sequences=stop_sequences,
147
- stream=False
148
- )
149
-
150
- # Parse response safely
151
- if isinstance(resp, str):
152
- bot_text = resp
153
- elif isinstance(resp, dict) and "generated_text" in resp:
154
- bot_text = resp["generated_text"]
155
- elif isinstance(resp, list) and resp and "generated_text" in resp[0]:
156
- bot_text = resp[0]["generated_text"]
157
- else:
158
- bot_text = "⚠️ Model returned an unexpected format."
159
-
160
  except Exception as e:
161
- bot_text = f"⚠️ Error with **{m}**: {type(e).__name__}: {e}"
162
-
163
- final_response = f"**{m}**\n\n{bot_text}"
164
- message_placeholder.markdown(final_response)
165
- st.session_state.messages.append({"role": "assistant", "content": final_response})
166
- st.rerun()
167
-
168
- # -------------------
169
- # Gemini Logic
170
- # -------------------
171
- elif provider == "Gemini":
172
- try:
173
- if user_input.strip():
174
- with st.spinner("Gemini is thinking..."):
175
- resp = st.session_state.gemini_chat.send_message(user_input)
176
- bot_text = resp.text
177
- else:
178
- bot_text = "⚠️ Please enter a message before sending."
179
- except Exception as e:
180
- bot_text = f"⚠️ Gemini could not respond right now. Please try again. ({e})"
181
-
182
- with st.chat_message("assistant"):
183
- st.markdown(bot_text)
184
-
185
- st.session_state.messages.append({"role": "assistant", "content": bot_text})
186
- st.rerun()
 
1
  import streamlit as st
2
+ from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
3
+ from langchain_huggingface import HuggingFaceEndpoint
4
+ from langchain_google_genai import ChatGoogleGenerativeAI
5
 
6
  # -------------------
7
  # API Keys Setup
 
12
  # -------------------
13
  # Configuration
14
  # -------------------
15
+ st.set_page_config(page_title="Your AI Buddy (LangChain)", layout="wide")
16
+ st.title("🤖 Your AI Buddy (LangChain Edition)")
17
 
18
+ HF_RECOMMENDED_MODELS = [
19
+ "google/flan-t5-small",
20
+ "google/flan-t5-base",
21
+ "distilgpt2"
22
+ ]
23
 
24
  # -------------------
25
  # Sidebar Settings
 
27
  st.sidebar.title("⚙️ Settings")
28
  provider = st.sidebar.selectbox("Provider", ["Hugging Face", "Gemini"])
29
 
30
+ system_prompt = st.sidebar.text_area(
31
+ "System Prompt",
32
+ "You are a helpful AI assistant. Provide concise and accurate answers."
33
+ )
34
+
35
+ # -------------------
36
+ # Chat History
37
+ # -------------------
38
+ if "messages" not in st.session_state:
39
+ st.session_state.messages = [SystemMessage(content=system_prompt)]
40
+
41
+ if st.sidebar.button("Reset Conversation"):
42
+ st.session_state.messages = [SystemMessage(content=system_prompt)]
43
+ st.rerun()
44
+
45
  # -------------------
46
  # Provider Setup
47
  # -------------------
48
+ llm = None
 
49
 
50
  if provider == "Hugging Face":
51
  if not huggingface_token:
52
+ st.error("⚠️ Missing Hugging Face token in secrets.")
53
  st.stop()
54
 
55
  selected_models = st.sidebar.multiselect(
56
+ "Choose Hugging Face models",
57
  HF_RECOMMENDED_MODELS,
58
  default=[HF_RECOMMENDED_MODELS[0]]
59
  )
60
+
61
  if not selected_models:
62
+ st.warning("⚠️ Please select at least one model.")
63
  st.stop()
64
 
65
+ # We'll just use the first one for now
66
+ model = selected_models[0]
67
+
68
+ llm = HuggingFaceEndpoint(
69
+ repo_id=model,
70
+ huggingfacehub_api_token=huggingface_token,
71
+ temperature=0.7,
72
+ max_new_tokens=256
73
+ )
74
 
75
  elif provider == "Gemini":
76
  if not gemini_api_key:
77
+ st.error("⚠️ Missing Gemini API key in secrets.")
78
  st.stop()
79
 
80
+ model = st.sidebar.selectbox(
81
+ "Choose Gemini model",
82
+ ["gemini-1.5-flash", "gemini-1.5-pro"], # safe defaults
83
+ index=0
84
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
+ llm = ChatGoogleGenerativeAI(
87
+ model=model,
88
+ google_api_key=gemini_api_key,
89
+ temperature=0.7
90
+ )
 
 
91
 
92
  # -------------------
93
+ # Display History
94
  # -------------------
95
  for msg in st.session_state.messages:
96
+ role = "user" if isinstance(msg, HumanMessage) else "assistant" if isinstance(msg, AIMessage) else "system"
97
+ if role != "system": # hide system messages
98
+ with st.chat_message(role):
99
+ st.markdown(msg.content)
100
 
101
  # -------------------
102
  # User Input
103
  # -------------------
104
  if user_input := st.chat_input("Type your message..."):
105
+ # Show user message
106
  st.chat_message("user").markdown(user_input)
107
+ st.session_state.messages.append(HumanMessage(content=user_input))
 
 
 
 
 
 
 
 
 
108
 
109
+ # Generate response
110
+ with st.chat_message("assistant"):
111
+ with st.spinner("Thinking..."):
112
  try:
113
+ response = llm.invoke(st.session_state.messages)
114
+ bot_text = response.content if hasattr(response, "content") else str(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  except Exception as e:
116
+ bot_text = f"⚠️ Error: {type(e).__name__}: {e}"
117
+
118
+ st.markdown(bot_text)
119
+ st.session_state.messages.append(AIMessage(content=bot_text))
120
+ st.rerun()